Add python-eventlet package to MOS 8.0 repository 85/11585/1 8.0 pre_1529660_master
authorIvan Udovichenko <iudovichenko@mirantis.com>
Tue, 15 Sep 2015 14:41:36 +0000 (17:41 +0300)
committerIvan Udovichenko <iudovichenko@mirantis.com>
Tue, 15 Sep 2015 14:41:45 +0000 (17:41 +0300)
Change-Id: I107626b90de56c28a33eb470469f4635bea2acfc
Version: 0.17.4-2~u14.04+mos1
Source: http://http.debian.net/debian/pool/main/p/python-eventlet/python-eventlet_0.17.4-2.dsc

259 files changed:
debian/changelog
debian/control
debian/copyright
debian/gbp.conf [new file with mode: 0644]
debian/links [deleted file]
debian/patches/enforce-tlsv1-always.patch [new file with mode: 0644]
debian/patches/fix-FTBFS-on-sphinx-build.patch
debian/patches/fixed-privacy-breach-in-examples.patch [new file with mode: 0644]
debian/patches/greenio_send_was_running_empty_loop_on_ENOTCONN.patch [new file with mode: 0644]
debian/patches/remove-self.assert-in-tests.patcher_test.py.patch
debian/patches/series
debian/patches/set-defaults-to-be-tlsv1-not-sslv23.patch [new file with mode: 0644]
debian/patches/skip-failing-tests.patch [deleted file]
debian/patches/use-packaged-python-mock-rather-than-embedded.patch
debian/python-eventlet-doc.doc-base [moved from debian/doc-base with 50% similarity]
debian/python-eventlet-doc.docs [moved from debian/docs with 100% similarity]
debian/rules
eventlet/PKG-INFO [deleted file]
eventlet/eventlet.egg-info/PKG-INFO [deleted file]
eventlet/eventlet.egg-info/SOURCES.txt [deleted file]
eventlet/eventlet.egg-info/dependency_links.txt [deleted file]
eventlet/eventlet.egg-info/not-zip-safe [deleted file]
eventlet/eventlet.egg-info/requires.txt [deleted file]
eventlet/eventlet.egg-info/top_level.txt [deleted file]
eventlet/eventlet/green/_socket_nodns.py [deleted file]
eventlet/eventlet/green/urllib.py [deleted file]
eventlet/eventlet/support/greendns.py [deleted file]
eventlet/eventlet/support/greenlets.py [deleted file]
eventlet/setup.cfg [deleted file]
eventlet/tests/greendns_test.py [deleted file]
eventlet/tests/mysqldb_test_monkey_patch.py [deleted file]
python-eventlet/AUTHORS [moved from eventlet/AUTHORS with 88% similarity]
python-eventlet/LICENSE [moved from eventlet/LICENSE with 100% similarity]
python-eventlet/MANIFEST.in [moved from eventlet/MANIFEST.in with 100% similarity]
python-eventlet/NEWS [moved from eventlet/NEWS with 95% similarity]
python-eventlet/README.rst [moved from eventlet/README.rst with 94% similarity]
python-eventlet/benchmarks/__init__.py [new file with mode: 0644]
python-eventlet/benchmarks/context.py [new file with mode: 0644]
python-eventlet/benchmarks/hub_timers.py [new file with mode: 0644]
python-eventlet/benchmarks/localhost_socket.py [new file with mode: 0644]
python-eventlet/benchmarks/spawn.py [new file with mode: 0644]
python-eventlet/benchmarks/spawn_plot.py [new file with mode: 0644]
python-eventlet/bin/build-website.bash [new file with mode: 0755]
python-eventlet/bin/release [new file with mode: 0755]
python-eventlet/doc/Makefile [moved from eventlet/doc/Makefile with 100% similarity]
python-eventlet/doc/_templates/layout.html [new file with mode: 0644]
python-eventlet/doc/authors.rst [moved from eventlet/doc/authors.rst with 100% similarity]
python-eventlet/doc/basic_usage.rst [moved from eventlet/doc/basic_usage.rst with 100% similarity]
python-eventlet/doc/common.txt [moved from eventlet/doc/common.txt with 100% similarity]
python-eventlet/doc/conf.py [moved from eventlet/doc/conf.py with 100% similarity]
python-eventlet/doc/design_patterns.rst [moved from eventlet/doc/design_patterns.rst with 100% similarity]
python-eventlet/doc/environment.rst [moved from eventlet/doc/environment.rst with 100% similarity]
python-eventlet/doc/examples.rst [moved from eventlet/doc/examples.rst with 100% similarity]
python-eventlet/doc/history.rst [moved from eventlet/doc/history.rst with 100% similarity]
python-eventlet/doc/hubs.rst [moved from eventlet/doc/hubs.rst with 100% similarity]
python-eventlet/doc/images/threading_illustration.png [moved from eventlet/doc/images/threading_illustration.png with 100% similarity]
python-eventlet/doc/index.rst [moved from eventlet/doc/index.rst with 100% similarity]
python-eventlet/doc/make.bat [new file with mode: 0644]
python-eventlet/doc/modules.rst [moved from eventlet/doc/modules.rst with 100% similarity]
python-eventlet/doc/modules/backdoor.rst [moved from eventlet/doc/modules/backdoor.rst with 100% similarity]
python-eventlet/doc/modules/corolocal.rst [moved from eventlet/doc/modules/corolocal.rst with 100% similarity]
python-eventlet/doc/modules/db_pool.rst [moved from eventlet/doc/modules/db_pool.rst with 100% similarity]
python-eventlet/doc/modules/debug.rst [moved from eventlet/doc/modules/debug.rst with 100% similarity]
python-eventlet/doc/modules/event.rst [moved from eventlet/doc/modules/event.rst with 100% similarity]
python-eventlet/doc/modules/greenpool.rst [moved from eventlet/doc/modules/greenpool.rst with 100% similarity]
python-eventlet/doc/modules/greenthread.rst [moved from eventlet/doc/modules/greenthread.rst with 100% similarity]
python-eventlet/doc/modules/pools.rst [moved from eventlet/doc/modules/pools.rst with 100% similarity]
python-eventlet/doc/modules/queue.rst [moved from eventlet/doc/modules/queue.rst with 100% similarity]
python-eventlet/doc/modules/semaphore.rst [moved from eventlet/doc/modules/semaphore.rst with 100% similarity]
python-eventlet/doc/modules/timeout.rst [moved from eventlet/doc/modules/timeout.rst with 100% similarity]
python-eventlet/doc/modules/websocket.rst [moved from eventlet/doc/modules/websocket.rst with 100% similarity]
python-eventlet/doc/modules/wsgi.rst [moved from eventlet/doc/modules/wsgi.rst with 100% similarity]
python-eventlet/doc/modules/zmq.rst [moved from eventlet/doc/modules/zmq.rst with 100% similarity]
python-eventlet/doc/patching.rst [moved from eventlet/doc/patching.rst with 100% similarity]
python-eventlet/doc/real_index.html [new file with mode: 0644]
python-eventlet/doc/ssl.rst [moved from eventlet/doc/ssl.rst with 100% similarity]
python-eventlet/doc/testing.rst [moved from eventlet/doc/testing.rst with 100% similarity]
python-eventlet/doc/threading.rst [moved from eventlet/doc/threading.rst with 100% similarity]
python-eventlet/doc/zeromq.rst [moved from eventlet/doc/zeromq.rst with 100% similarity]
python-eventlet/eventlet/__init__.py [moved from eventlet/eventlet/__init__.py with 98% similarity]
python-eventlet/eventlet/backdoor.py [moved from eventlet/eventlet/backdoor.py with 100% similarity]
python-eventlet/eventlet/convenience.py [moved from eventlet/eventlet/convenience.py with 100% similarity]
python-eventlet/eventlet/corolocal.py [moved from eventlet/eventlet/corolocal.py with 100% similarity]
python-eventlet/eventlet/coros.py [moved from eventlet/eventlet/coros.py with 100% similarity]
python-eventlet/eventlet/db_pool.py [moved from eventlet/eventlet/db_pool.py with 100% similarity]
python-eventlet/eventlet/debug.py [moved from eventlet/eventlet/debug.py with 100% similarity]
python-eventlet/eventlet/event.py [moved from eventlet/eventlet/event.py with 99% similarity]
python-eventlet/eventlet/green/BaseHTTPServer.py [moved from eventlet/eventlet/green/BaseHTTPServer.py with 100% similarity]
python-eventlet/eventlet/green/CGIHTTPServer.py [moved from eventlet/eventlet/green/CGIHTTPServer.py with 100% similarity]
python-eventlet/eventlet/green/MySQLdb.py [moved from eventlet/eventlet/green/MySQLdb.py with 100% similarity]
python-eventlet/eventlet/green/OpenSSL/SSL.py [moved from eventlet/eventlet/green/OpenSSL/SSL.py with 100% similarity]
python-eventlet/eventlet/green/OpenSSL/__init__.py [moved from eventlet/eventlet/green/OpenSSL/__init__.py with 100% similarity]
python-eventlet/eventlet/green/OpenSSL/crypto.py [moved from eventlet/eventlet/green/OpenSSL/crypto.py with 100% similarity]
python-eventlet/eventlet/green/OpenSSL/rand.py [moved from eventlet/eventlet/green/OpenSSL/rand.py with 100% similarity]
python-eventlet/eventlet/green/OpenSSL/tsafe.py [moved from eventlet/eventlet/green/OpenSSL/tsafe.py with 100% similarity]
python-eventlet/eventlet/green/OpenSSL/version.py [moved from eventlet/eventlet/green/OpenSSL/version.py with 100% similarity]
python-eventlet/eventlet/green/Queue.py [moved from eventlet/eventlet/green/Queue.py with 100% similarity]
python-eventlet/eventlet/green/SimpleHTTPServer.py [moved from eventlet/eventlet/green/SimpleHTTPServer.py with 100% similarity]
python-eventlet/eventlet/green/SocketServer.py [moved from eventlet/eventlet/green/SocketServer.py with 100% similarity]
python-eventlet/eventlet/green/__init__.py [moved from eventlet/eventlet/green/__init__.py with 100% similarity]
python-eventlet/eventlet/green/_socket_nodns.py [new file with mode: 0644]
python-eventlet/eventlet/green/asynchat.py [moved from eventlet/eventlet/green/asynchat.py with 100% similarity]
python-eventlet/eventlet/green/asyncore.py [moved from eventlet/eventlet/green/asyncore.py with 100% similarity]
python-eventlet/eventlet/green/builtin.py [moved from eventlet/eventlet/green/builtin.py with 100% similarity]
python-eventlet/eventlet/green/ftplib.py [moved from eventlet/eventlet/green/ftplib.py with 100% similarity]
python-eventlet/eventlet/green/http/__init__.py [new file with mode: 0644]
python-eventlet/eventlet/green/http/client.py [new file with mode: 0644]
python-eventlet/eventlet/green/http/cookiejar.py [new file with mode: 0644]
python-eventlet/eventlet/green/http/cookies.py [new file with mode: 0644]
python-eventlet/eventlet/green/http/server.py [new file with mode: 0644]
python-eventlet/eventlet/green/httplib.py [moved from eventlet/eventlet/green/httplib.py with 100% similarity]
python-eventlet/eventlet/green/os.py [moved from eventlet/eventlet/green/os.py with 100% similarity]
python-eventlet/eventlet/green/profile.py [moved from eventlet/eventlet/green/profile.py with 100% similarity]
python-eventlet/eventlet/green/select.py [moved from eventlet/eventlet/green/select.py with 100% similarity]
python-eventlet/eventlet/green/selectors.py [new file with mode: 0644]
python-eventlet/eventlet/green/socket.py [moved from eventlet/eventlet/green/socket.py with 92% similarity]
python-eventlet/eventlet/green/ssl.py [moved from eventlet/eventlet/green/ssl.py with 95% similarity]
python-eventlet/eventlet/green/subprocess.py [moved from eventlet/eventlet/green/subprocess.py with 93% similarity]
python-eventlet/eventlet/green/thread.py [moved from eventlet/eventlet/green/thread.py with 68% similarity]
python-eventlet/eventlet/green/threading.py [moved from eventlet/eventlet/green/threading.py with 100% similarity]
python-eventlet/eventlet/green/time.py [moved from eventlet/eventlet/green/time.py with 100% similarity]
python-eventlet/eventlet/green/urllib/__init__.py [new file with mode: 0644]
python-eventlet/eventlet/green/urllib/error.py [new file with mode: 0644]
python-eventlet/eventlet/green/urllib/parse.py [new file with mode: 0644]
python-eventlet/eventlet/green/urllib/request.py [new file with mode: 0644]
python-eventlet/eventlet/green/urllib/response.py [new file with mode: 0644]
python-eventlet/eventlet/green/urllib2.py [moved from eventlet/eventlet/green/urllib2.py with 100% similarity]
python-eventlet/eventlet/green/zmq.py [moved from eventlet/eventlet/green/zmq.py with 99% similarity]
python-eventlet/eventlet/greenio/__init__.py [new file with mode: 0644]
python-eventlet/eventlet/greenio/base.py [moved from eventlet/eventlet/greenio.py with 71% similarity]
python-eventlet/eventlet/greenio/py2.py [new file with mode: 0644]
python-eventlet/eventlet/greenio/py3.py [new file with mode: 0644]
python-eventlet/eventlet/greenpool.py [moved from eventlet/eventlet/greenpool.py with 100% similarity]
python-eventlet/eventlet/greenthread.py [moved from eventlet/eventlet/greenthread.py with 100% similarity]
python-eventlet/eventlet/hubs/__init__.py [moved from eventlet/eventlet/hubs/__init__.py with 100% similarity]
python-eventlet/eventlet/hubs/epolls.py [moved from eventlet/eventlet/hubs/epolls.py with 100% similarity]
python-eventlet/eventlet/hubs/hub.py [moved from eventlet/eventlet/hubs/hub.py with 100% similarity]
python-eventlet/eventlet/hubs/kqueue.py [moved from eventlet/eventlet/hubs/kqueue.py with 100% similarity]
python-eventlet/eventlet/hubs/poll.py [moved from eventlet/eventlet/hubs/poll.py with 100% similarity]
python-eventlet/eventlet/hubs/pyevent.py [moved from eventlet/eventlet/hubs/pyevent.py with 100% similarity]
python-eventlet/eventlet/hubs/selects.py [moved from eventlet/eventlet/hubs/selects.py with 100% similarity]
python-eventlet/eventlet/hubs/timer.py [moved from eventlet/eventlet/hubs/timer.py with 100% similarity]
python-eventlet/eventlet/patcher.py [moved from eventlet/eventlet/patcher.py with 94% similarity]
python-eventlet/eventlet/pools.py [moved from eventlet/eventlet/pools.py with 100% similarity]
python-eventlet/eventlet/queue.py [moved from eventlet/eventlet/queue.py with 100% similarity]
python-eventlet/eventlet/semaphore.py [moved from eventlet/eventlet/semaphore.py with 90% similarity]
python-eventlet/eventlet/support/__init__.py [moved from eventlet/eventlet/support/__init__.py with 100% similarity]
python-eventlet/eventlet/support/greendns.py [new file with mode: 0644]
python-eventlet/eventlet/support/greenlets.py [new file with mode: 0644]
python-eventlet/eventlet/support/psycopg2_patcher.py [moved from eventlet/eventlet/support/psycopg2_patcher.py with 100% similarity]
python-eventlet/eventlet/support/pylib.py [moved from eventlet/eventlet/support/pylib.py with 100% similarity]
python-eventlet/eventlet/support/six.py [moved from eventlet/eventlet/support/six.py with 100% similarity]
python-eventlet/eventlet/support/stacklesspypys.py [moved from eventlet/eventlet/support/stacklesspypys.py with 100% similarity]
python-eventlet/eventlet/support/stacklesss.py [moved from eventlet/eventlet/support/stacklesss.py with 100% similarity]
python-eventlet/eventlet/timeout.py [moved from eventlet/eventlet/timeout.py with 100% similarity]
python-eventlet/eventlet/tpool.py [moved from eventlet/eventlet/tpool.py with 97% similarity]
python-eventlet/eventlet/websocket.py [moved from eventlet/eventlet/websocket.py with 100% similarity]
python-eventlet/eventlet/wsgi.py [moved from eventlet/eventlet/wsgi.py with 94% similarity]
python-eventlet/examples/chat_bridge.py [moved from eventlet/examples/chat_bridge.py with 100% similarity]
python-eventlet/examples/chat_server.py [moved from eventlet/examples/chat_server.py with 100% similarity]
python-eventlet/examples/connect.py [moved from eventlet/examples/connect.py with 100% similarity]
python-eventlet/examples/distributed_websocket_chat.py [moved from eventlet/examples/distributed_websocket_chat.py with 100% similarity]
python-eventlet/examples/echoserver.py [moved from eventlet/examples/echoserver.py with 100% similarity]
python-eventlet/examples/feedscraper-testclient.py [moved from eventlet/examples/feedscraper-testclient.py with 100% similarity]
python-eventlet/examples/feedscraper.py [moved from eventlet/examples/feedscraper.py with 100% similarity]
python-eventlet/examples/forwarder.py [moved from eventlet/examples/forwarder.py with 100% similarity]
python-eventlet/examples/producer_consumer.py [moved from eventlet/examples/producer_consumer.py with 100% similarity]
python-eventlet/examples/recursive_crawler.py [moved from eventlet/examples/recursive_crawler.py with 100% similarity]
python-eventlet/examples/webcrawler.py [moved from eventlet/examples/webcrawler.py with 100% similarity]
python-eventlet/examples/websocket.html [moved from eventlet/examples/websocket.html with 100% similarity]
python-eventlet/examples/websocket.py [moved from eventlet/examples/websocket.py with 100% similarity]
python-eventlet/examples/websocket_chat.html [moved from eventlet/examples/websocket_chat.html with 100% similarity]
python-eventlet/examples/websocket_chat.py [moved from eventlet/examples/websocket_chat.py with 100% similarity]
python-eventlet/examples/wsgi.py [moved from eventlet/examples/wsgi.py with 100% similarity]
python-eventlet/examples/zmq_chat.py [moved from eventlet/examples/zmq_chat.py with 100% similarity]
python-eventlet/examples/zmq_simple.py [moved from eventlet/examples/zmq_simple.py with 100% similarity]
python-eventlet/setup.cfg [new file with mode: 0644]
python-eventlet/setup.py [moved from eventlet/setup.py with 86% similarity]
python-eventlet/tests/README [new file with mode: 0644]
python-eventlet/tests/__init__.py [moved from eventlet/tests/__init__.py with 82% similarity]
python-eventlet/tests/api_test.py [moved from eventlet/tests/api_test.py with 99% similarity]
python-eventlet/tests/backdoor_test.py [moved from eventlet/tests/backdoor_test.py with 100% similarity]
python-eventlet/tests/convenience_test.py [moved from eventlet/tests/convenience_test.py with 100% similarity]
python-eventlet/tests/db_pool_test.py [moved from eventlet/tests/db_pool_test.py with 100% similarity]
python-eventlet/tests/debug_test.py [moved from eventlet/tests/debug_test.py with 100% similarity]
python-eventlet/tests/env_test.py [moved from eventlet/tests/env_test.py with 100% similarity]
python-eventlet/tests/event_test.py [moved from eventlet/tests/event_test.py with 100% similarity]
python-eventlet/tests/fork_test.py [moved from eventlet/tests/fork_test.py with 100% similarity]
python-eventlet/tests/greendns_test.py [new file with mode: 0644]
python-eventlet/tests/greenio_test.py [moved from eventlet/tests/greenio_test.py with 94% similarity]
python-eventlet/tests/greenpipe_test_with_statement.py [moved from eventlet/tests/greenpipe_test_with_statement.py with 100% similarity]
python-eventlet/tests/greenpool_test.py [moved from eventlet/tests/greenpool_test.py with 99% similarity]
python-eventlet/tests/greenthread_test.py [moved from eventlet/tests/greenthread_test.py with 100% similarity]
python-eventlet/tests/hub_test.py [moved from eventlet/tests/hub_test.py with 98% similarity]
python-eventlet/tests/hub_test_fork.py [moved from eventlet/tests/hub_test_fork.py with 100% similarity]
python-eventlet/tests/isolated/__init__.py [moved from eventlet/tests/manual/__init__.py with 100% similarity]
python-eventlet/tests/isolated/greendns_from_address_203.py [new file with mode: 0644]
python-eventlet/tests/isolated/greenio_double_close_219.py [new file with mode: 0644]
python-eventlet/tests/isolated/mysqldb_monkey_patch.py [new file with mode: 0644]
python-eventlet/tests/isolated/patcher_importlib_lock.py [moved from eventlet/tests/patcher_test_importlib_lock.py with 96% similarity]
python-eventlet/tests/isolated/patcher_threading_condition.py [new file with mode: 0644]
python-eventlet/tests/isolated/patcher_threading_join.py [new file with mode: 0644]
python-eventlet/tests/isolated/wsgi_connection_timeout.py [moved from eventlet/tests/wsgi_test_conntimeout.py with 53% similarity]
python-eventlet/tests/manual/__init__.py [new file with mode: 0644]
python-eventlet/tests/manual/greenio_memtest.py [moved from eventlet/tests/manual/greenio_memtest.py with 100% similarity]
python-eventlet/tests/manual/regress-226-unpatched-ssl.py [new file with mode: 0644]
python-eventlet/tests/mock.py [moved from eventlet/tests/mock.py with 99% similarity]
python-eventlet/tests/mysqldb_test.py [moved from eventlet/tests/mysqldb_test.py with 91% similarity]
python-eventlet/tests/nosewrapper.py [moved from eventlet/tests/nosewrapper.py with 100% similarity]
python-eventlet/tests/parse_results.py [moved from eventlet/tests/parse_results.py with 100% similarity]
python-eventlet/tests/patcher_psycopg_test.py [moved from eventlet/tests/patcher_psycopg_test.py with 100% similarity]
python-eventlet/tests/patcher_test.py [moved from eventlet/tests/patcher_test.py with 96% similarity]
python-eventlet/tests/pools_test.py [moved from eventlet/tests/pools_test.py with 100% similarity]
python-eventlet/tests/queue_test.py [moved from eventlet/tests/queue_test.py with 100% similarity]
python-eventlet/tests/semaphore_test.py [moved from eventlet/tests/semaphore_test.py with 74% similarity]
python-eventlet/tests/socket_test.py [moved from eventlet/tests/socket_test.py with 100% similarity]
python-eventlet/tests/ssl_test.py [moved from eventlet/tests/ssl_test.py with 100% similarity]
python-eventlet/tests/stdlib/all.py [moved from eventlet/tests/stdlib/all.py with 100% similarity]
python-eventlet/tests/stdlib/all_modules.py [moved from eventlet/tests/stdlib/all_modules.py with 100% similarity]
python-eventlet/tests/stdlib/all_monkey.py [moved from eventlet/tests/stdlib/all_monkey.py with 100% similarity]
python-eventlet/tests/stdlib/test_SimpleHTTPServer.py [moved from eventlet/tests/stdlib/test_SimpleHTTPServer.py with 100% similarity]
python-eventlet/tests/stdlib/test_asynchat.py [moved from eventlet/tests/stdlib/test_asynchat.py with 100% similarity]
python-eventlet/tests/stdlib/test_asyncore.py [moved from eventlet/tests/stdlib/test_asyncore.py with 100% similarity]
python-eventlet/tests/stdlib/test_ftplib.py [moved from eventlet/tests/stdlib/test_ftplib.py with 100% similarity]
python-eventlet/tests/stdlib/test_httplib.py [moved from eventlet/tests/stdlib/test_httplib.py with 100% similarity]
python-eventlet/tests/stdlib/test_httpservers.py [moved from eventlet/tests/stdlib/test_httpservers.py with 100% similarity]
python-eventlet/tests/stdlib/test_os.py [moved from eventlet/tests/stdlib/test_os.py with 100% similarity]
python-eventlet/tests/stdlib/test_queue.py [moved from eventlet/tests/stdlib/test_queue.py with 100% similarity]
python-eventlet/tests/stdlib/test_select.py [moved from eventlet/tests/stdlib/test_select.py with 100% similarity]
python-eventlet/tests/stdlib/test_socket.py [moved from eventlet/tests/stdlib/test_socket.py with 100% similarity]
python-eventlet/tests/stdlib/test_socket_ssl.py [moved from eventlet/tests/stdlib/test_socket_ssl.py with 100% similarity]
python-eventlet/tests/stdlib/test_socketserver.py [moved from eventlet/tests/stdlib/test_socketserver.py with 100% similarity]
python-eventlet/tests/stdlib/test_ssl.py [moved from eventlet/tests/stdlib/test_ssl.py with 100% similarity]
python-eventlet/tests/stdlib/test_subprocess.py [moved from eventlet/tests/stdlib/test_subprocess.py with 100% similarity]
python-eventlet/tests/stdlib/test_thread.py [moved from eventlet/tests/stdlib/test_thread.py with 100% similarity]
python-eventlet/tests/stdlib/test_thread__boundedsem.py [moved from eventlet/tests/stdlib/test_thread__boundedsem.py with 100% similarity]
python-eventlet/tests/stdlib/test_threading.py [moved from eventlet/tests/stdlib/test_threading.py with 100% similarity]
python-eventlet/tests/stdlib/test_threading_local.py [moved from eventlet/tests/stdlib/test_threading_local.py with 100% similarity]
python-eventlet/tests/stdlib/test_timeout.py [moved from eventlet/tests/stdlib/test_timeout.py with 100% similarity]
python-eventlet/tests/stdlib/test_urllib.py [moved from eventlet/tests/stdlib/test_urllib.py with 100% similarity]
python-eventlet/tests/stdlib/test_urllib2.py [moved from eventlet/tests/stdlib/test_urllib2.py with 100% similarity]
python-eventlet/tests/stdlib/test_urllib2_localnet.py [moved from eventlet/tests/stdlib/test_urllib2_localnet.py with 100% similarity]
python-eventlet/tests/subprocess_test.py [moved from eventlet/tests/subprocess_test.py with 72% similarity]
python-eventlet/tests/test__event.py [moved from eventlet/tests/test__event.py with 100% similarity]
python-eventlet/tests/test__greenness.py [moved from eventlet/tests/test__greenness.py with 79% similarity]
python-eventlet/tests/test__refcount.py [moved from eventlet/tests/test__refcount.py with 100% similarity]
python-eventlet/tests/test__socket_errors.py [moved from eventlet/tests/test__socket_errors.py with 87% similarity]
python-eventlet/tests/test_server.crt [moved from eventlet/tests/test_server.crt with 100% similarity]
python-eventlet/tests/test_server.key [moved from eventlet/tests/test_server.key with 100% similarity]
python-eventlet/tests/thread_test.py [moved from eventlet/tests/thread_test.py with 100% similarity]
python-eventlet/tests/timeout_test.py [moved from eventlet/tests/timeout_test.py with 100% similarity]
python-eventlet/tests/timeout_test_with_statement.py [moved from eventlet/tests/timeout_test_with_statement.py with 100% similarity]
python-eventlet/tests/timer_test.py [moved from eventlet/tests/timer_test.py with 100% similarity]
python-eventlet/tests/tpool_test.py [moved from eventlet/tests/tpool_test.py with 96% similarity]
python-eventlet/tests/websocket_new_test.py [moved from eventlet/tests/websocket_new_test.py with 97% similarity]
python-eventlet/tests/websocket_test.py [moved from eventlet/tests/websocket_test.py with 100% similarity]
python-eventlet/tests/wsgi_test.py [moved from eventlet/tests/wsgi_test.py with 98% similarity]
python-eventlet/tests/zmq_test.py [moved from eventlet/tests/zmq_test.py with 100% similarity]
python-eventlet/tox.ini [new file with mode: 0644]

index f82738abe7b510618f6d790c3895dd7f3971397a..2eee87a9f74bd108d069eb6255bfc3a68c681956 100644 (file)
@@ -1,32 +1,59 @@
-python-eventlet (0.16.1-1~u14.04+mos1) mos7.0; urgency=medium
+python-eventlet (0.17.4-2~u14.04+mos1) mos8.0; urgency=medium
 
 
-  * python-eventlet 0.16.1 is needed according to Kilo requirements
-    https://github.com/openstack/requirements/blob/stable/kilo/global-requirements.txt#L31
-  * Sources are from https://launchpad.net/ubuntu/+source/python-eventlet/0.16.1-0ubuntu1
+  * Source: http://http.debian.net/debian/pool/main/p/python-eventlet/python-eventlet_0.17.4-2.dsc
 
 
- -- Daniil Trishkin <dtrishkin@mirantis.com>  Thu, 16 Apr 2015 19:25:49 +0300
+ -- Ivan Udovichenko <iudovichenko@mirantis.com>  Tue, 15 Sep 2015 17:41:33 +0300
 
 
-python-eventlet (0.15.2-1~u14.04+mos1) mos6.1; urgency=medium
+python-eventlet (0.17.4-2) unstable; urgency=medium
 
 
-  * Adjust the package revision according to the versioning policy
-    stated in the separate-mos-from-linux blueprint.
+  * Added greenio_send_was_running_empty_loop_on_ENOTCONN.patch.
 
 
- -- Alexei Sheplyakov <asheplyakov@mirantis.com>  Thu, 09 Apr 2015 14:14:45 +0300
+ -- Thomas Goirand <zigo@debian.org>  Wed, 27 May 2015 21:33:41 +0000
 
 
-python-eventlet (0.15.2-1ubuntu1~mos6.1+1) trusty; urgency=medium
+python-eventlet (0.17.4-1) unstable; urgency=medium
 
 
-  * Backport to Ubuntu 14.04 to satisfy OpenStack Juno global requirements
-    (only versions 0.15.[12] are good enough).
-  * Added a patch which fixes ENOTCONN handling and prevents services from
-    hogging CPU when rsyslogd gets restarted.
+  * New upstream release.
+  * Switched to PKG OpenStack <openstack-devel@lists.alioth.debian.org> team:
+    - Updated Maintainer:.
+    - Fixed VCS URLs (now using Git).
+    - Added openstack-pkg-tools as build-depends.
+    - Filled upstream VCS URL in debian/rules.
+  * Updated debian/copyright.
+  * Added dh-python as build-depends.
+  * Ran wrap-and-sort -t -a.
+  * Removed version for python-greenlet build-depends (the required version is
+    even available in Wheezy, and Jessie is out...).
+  * Removed privacy breach in example/websocket*.html
+  * Removed duplicate doc-base registration.
+
+ -- Thomas Goirand <zigo@debian.org>  Wed, 27 May 2015 14:01:28 +0200
+
+python-eventlet (0.17.3-4) unstable; urgency=medium
+
+  * Enforce default protocol to be TLSv1 and not SSLv23.
+
+ -- Thomas Goirand <zigo@debian.org>  Thu, 21 May 2015 17:20:12 +0000
+
+python-eventlet (0.17.3-3) unstable; urgency=medium
 
 
- -- Alexei Sheplyakov <asheplyakov@mirantis.com>  Fri, 13 Feb 2015 16:22:37 +0300
+  * Enforce TLSv1 always, instead of the more permissive SSLv23.
+  * Standards-Version bumped to 3.9.6.
 
 
-python-eventlet (0.15.2-1ubuntu1) vivid; urgency=medium
+ -- Thomas Goirand <zigo@debian.org>  Thu, 21 May 2015 17:09:29 +0000
 
 
-  * debian/control: python-zmq is optional 
+python-eventlet (0.17.3-2) unstable; urgency=medium
+
+  * Activates --with python3 which was missing.
+
+ -- Thomas Goirand <zigo@debian.org>  Sat, 09 May 2015 20:58:47 +0000
+
+python-eventlet (0.17.3-1) unstable; urgency=medium
+
+  * New upstream release.
+  * Uploading to unstable.
+  * Added Python 3 package, since upstream has it now.
 
 
- -- Chuck Short <zulcss@ubuntu.com>  Thu, 13 Nov 2014 08:49:43 -0500
+ -- Thomas Goirand <zigo@debian.org>  Sun, 08 Feb 2015 12:34:57 +0000
 
 python-eventlet (0.15.2-1) experimental; urgency=medium
 
 
 python-eventlet (0.15.2-1) experimental; urgency=medium
 
index 54d0b6f66b17fe836897b7afec386faf5f60da2c..821eb1a4683c76749fa189350acc814047456b89 100644 (file)
@@ -1,22 +1,39 @@
 Source: python-eventlet
 Section: python
 Priority: optional
 Source: python-eventlet
 Section: python
 Priority: optional
-Maintainer: Mirantis OpenStack Team <mos@mirantis.com>
+Maintainer: PKG OpenStack <openstack-devel@lists.alioth.debian.org>
+Uploaders: Laszlo Boszormenyi (GCS) <gcs@debian.hu>,
+           Thomas Goirand <zigo@debian.org>,
 Build-Depends: debhelper (>= 9),
 Build-Depends: debhelper (>= 9),
-               python-all (>= 2.7.1),
+               dh-python,
+               openstack-pkg-tools,
+               python-all,
                python-setuptools,
                python-setuptools,
-               python-sphinx (>= 1.0.7+dfsg)
-Build-Depends-Indep: python-greenlet (>= 0.3.1-2.1),
+               python-sphinx,
+               python3-all,
+               python3-setuptools,
+Build-Depends-Indep: python-greenlet,
                      python-httplib2,
                      python-mock,
                      python-nose,
                      python-httplib2,
                      python-mock,
                      python-nose,
-                     python-openssl
-Standards-Version: 3.9.5
+                     python-openssl,
+                     python-zmq,
+                     python3-greenlet,
+                     python3-httplib2,
+                     python3-mock,
+                     python3-nose,
+                     python3-openssl,
+                     python3-zmq,
+Standards-Version: 3.9.6
+Vcs-Browser: http://anonscm.debian.org/gitweb/?p=openstack/python-eventlet.git;a=summary
+Vcs-Git: git://anonscm.debian.org/openstack/python-eventlet.git
 Homepage: http://eventlet.net
 
 Package: python-eventlet
 Architecture: all
 Homepage: http://eventlet.net
 
 Package: python-eventlet
 Architecture: all
-Depends: ${misc:Depends}, ${python:Depends}, ${sphinxdoc:Depends}
+Depends: python-greenlet,
+         ${misc:Depends},
+         ${python:Depends},
 Description: concurrent networking library - Python 2.x
  Eventlet allows you to change how you run your code, not how you write it.
  .
 Description: concurrent networking library - Python 2.x
  Eventlet allows you to change how you run your code, not how you write it.
  .
@@ -31,3 +48,46 @@ Description: concurrent networking library - Python 2.x
  patterns, and the list of the basic API primitives.
  .
  This package provides the Python 2.x module.
  patterns, and the list of the basic API primitives.
  .
  This package provides the Python 2.x module.
+
+Package: python3-eventlet
+Architecture: all
+Depends: python3-greenlet,
+         ${misc:Depends},
+         ${python3:Depends},
+         ${sphinxdoc:Depends},
+Description: concurrent networking library - Python 3.x
+ Eventlet allows you to change how you run your code, not how you write it.
+ .
+ It uses epoll or libevent for highly scalable non-blocking I/O.
+ Coroutines ensure that the developer uses a blocking style of programming
+ that is similar to threading, but provide the benefits of non-blocking I/O.
+ The event dispatch is implicit, which means you can easily use Eventlet from
+ the Python interpreter, or as a small part of a larger application.
+ .
+ It's easy to get started using Eventlet, and easy to convert existing
+ applications to use it. Start off by looking at examples, common design
+ patterns, and the list of the basic API primitives.
+ .
+ This package provides the Python 3.x module.
+
+Package: python-eventlet-doc
+Architecture: all
+Section: doc
+Breaks: python-eventlet (<< 0.17.4)
+Replaces: python-eventlet (<< 0.17.4)
+Depends: ${misc:Depends},
+         ${sphinxdoc:Depends},
+Description: concurrent networking library - doc
+ Eventlet allows you to change how you run your code, not how you write it.
+ .
+ It uses epoll or libevent for highly scalable non-blocking I/O.
+ Coroutines ensure that the developer uses a blocking style of programming
+ that is similar to threading, but provide the benefits of non-blocking I/O.
+ The event dispatch is implicit, which means you can easily use Eventlet from
+ the Python interpreter, or as a small part of a larger application.
+ .
+ It's easy to get started using Eventlet, and easy to convert existing
+ applications to use it. Start off by looking at examples, common design
+ patterns, and the list of the basic API primitives.
+ .
+ This package provides the documentation.
index 3fc56b65448306c195763e88561600ab34901b6f..4af787c33dade7541125539342314cf64ce66b1a 100644 (file)
@@ -101,10 +101,11 @@ License: voidspace-bsd
  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 Files: debian/*
  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 Files: debian/*
-Copyright:
- 2010,      Monty Taylor <mordred@inaugust.com>
- 2010-2011, Soren Hansen <soren@ubuntu.com>
- 2011-2012, Stefano Rivera <stefanor@debian.org>
+Copyright: (c) 2010, Monty Taylor <mordred@inaugust.com>
+           (c) 2010-2011, Soren Hansen <soren@ubuntu.com>
+           (c) 2011-2012, Stefano Rivera <stefanor@debian.org>
+           (c) 2013-2015, Thomas Goirand <zigo@debian.org>
+           (c) 2012-2013, Laszlo Boszormenyi (GCS) <gcs@debian.hu>
 License: generic-bsd
 
 License: generic-bsd
 License: generic-bsd
 
 License: generic-bsd
diff --git a/debian/gbp.conf b/debian/gbp.conf
new file mode 100644 (file)
index 0000000..7bf5959
--- /dev/null
@@ -0,0 +1,8 @@
+[DEFAULT]
+upstream-branch = master
+debian-branch = debian/unstable
+upstream-tag = %(version)s
+compression = xz
+
+[buildpackage]
+export-dir = ../build-area/
diff --git a/debian/links b/debian/links
deleted file mode 100644 (file)
index 5a81f86..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/usr/share/doc/python-eventlet/html/_sources /usr/share/doc/python-eventlet/rst
diff --git a/debian/patches/enforce-tlsv1-always.patch b/debian/patches/enforce-tlsv1-always.patch
new file mode 100644 (file)
index 0000000..4616a7c
--- /dev/null
@@ -0,0 +1,18 @@
+Description: Always enforce TLSv1
+ Upstream allows SSLv23, but we don't want this, we want TLSv1 always, as
+ we shouldn't trust lower types of crypto.
+Author: Thomas Goirand <zigo@debian.org>
+Forwarded: not-needed
+Last-Update: 2015-05-21
+
+--- python-eventlet-0.17.3.orig/eventlet/convenience.py
++++ python-eventlet-0.17.3/eventlet/convenience.py
+@@ -139,7 +139,7 @@ except ImportError:
+                           do_handshake_on_connect=True,
+                           suppress_ragged_eofs=True, ciphers=None):
+             # theoretically the ssl_version could be respected in this line
+-            context = SSL.Context(SSL.SSLv23_METHOD)
++            context = SSL.Context(SSL.TLSv1_METHOD)
+             if certfile is not None:
+                 context.use_certificate_file(certfile)
+             if keyfile is not None:
index 0de8e1525eee8f1900c1e57251f41b48f74422d2..807699126e59d16005e3bc79910cbb1f21509527 100644 (file)
@@ -2,8 +2,8 @@ Description: Removes line that is doing FTBFS in doc
 Author: Thomas Goirand <zigo@debian.org>
 Forwarded: no
 
 Author: Thomas Goirand <zigo@debian.org>
 Forwarded: no
 
---- a/doc/modules/zmq.rst
-+++ b/doc/modules/zmq.rst
+--- python-eventlet-0.13.0.orig/doc/modules/zmq.rst
++++ python-eventlet-0.13.0/doc/modules/zmq.rst
 @@ -15,7 +15,6 @@
  
  .. autoclass:: Socket
 @@ -15,7 +15,6 @@
  
  .. autoclass:: Socket
diff --git a/debian/patches/fixed-privacy-breach-in-examples.patch b/debian/patches/fixed-privacy-breach-in-examples.patch
new file mode 100644 (file)
index 0000000..e714d98
--- /dev/null
@@ -0,0 +1,30 @@
+Description: Fixed privacy breach in examples
+ Upstream is referencing external websites, we don't allow this.
+Author: Thomas Goirand <zigo@debian.org>
+Forwarded: no
+Last-Update: 2015-05-27
+
+--- python-eventlet-0.17.4.orig/examples/websocket.html
++++ python-eventlet-0.17.4/examples/websocket.html
+@@ -3,8 +3,8 @@
+ <head>
+ <!-- idea and code swiped from 
+ http://assorted.svn.sourceforge.net/viewvc/assorted/real-time-plotter/trunk/src/rtp.html?view=markup -->
+-<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.4.1/jquery.min.js"></script>
+-<script src="http://people.iola.dk/olau/flot/jquery.flot.js"></script>
++<script src="jquery.min.js"></script>
++<script src="jquery.flot.js"></script>
+ <script>
+ window.onload = function() {
+     var data = {};
+--- python-eventlet-0.17.4.orig/examples/websocket_chat.html
++++ python-eventlet-0.17.4/examples/websocket_chat.html
+@@ -1,7 +1,7 @@
+ <!DOCTYPE html>
+ <html>
+ <head>
+-<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js"></script>
++<script src="jquery.min.js"></script>
+ <script>
+ window.onload = function() {
+   var data = {};
diff --git a/debian/patches/greenio_send_was_running_empty_loop_on_ENOTCONN.patch b/debian/patches/greenio_send_was_running_empty_loop_on_ENOTCONN.patch
new file mode 100644 (file)
index 0000000..040c109
--- /dev/null
@@ -0,0 +1,29 @@
+Description: greenio: send() was running empty loop on ENOTCONN
+ Thanks to Seyeong Kim
+ https://github.com/eventlet/eventlet/issues/192
+Author: Sergey Shepelev <temotor@gmail.com>
+Date: Fri, 15 May 2015 03:56:04 +0300
+
+diff --git a/AUTHORS b/AUTHORS
+index e0ab0e2..c57f010 100644
+--- a/AUTHORS
++++ b/AUTHORS
+@@ -119,3 +119,4 @@ Thanks To
+ * Sean Dague, wsgi: Provide python logging compatibility
+ * Tim Simmons, Use _socket_nodns and select in dnspython support
+ * Antonio Cuni, fix fd double close on PyPy
++* Seyeong Kim
+diff --git a/eventlet/greenio/base.py b/eventlet/greenio/base.py
+index 8da51ca..1e43176 100644
+--- a/eventlet/greenio/base.py
++++ b/eventlet/greenio/base.py
+@@ -358,7 +358,8 @@ def send(self, data, flags=0):
+             try:
+                 total_sent += fd.send(data[total_sent:], flags)
+             except socket.error as e:
+-                if get_errno(e) not in SOCKET_BLOCKING:
++                eno = get_errno(e)
++                if eno == errno.ENOTCONN or eno not in SOCKET_BLOCKING:
+                     raise
+             if total_sent == len_data:
index d7ffc3dee66c9f0ed006e3ea7f2ab55deb31c981..2946ca1441aa93ac1a17a3c90d11b4780ae29c61 100644 (file)
@@ -3,9 +3,11 @@ Author: Thomas Goirand <zigo@debian.org>
 Forwarded: no
 Last-Update: 2014-09-07
 
 Forwarded: no
 Last-Update: 2014-09-07
 
---- a/tests/patcher_test.py
-+++ b/tests/patcher_test.py
-@@ -321,7 +321,7 @@ print(len(_threading._active))
+Index: python-eventlet-0.17.3/tests/patcher_test.py
+===================================================================
+--- python-eventlet-0.17.3.orig/tests/patcher_test.py
++++ python-eventlet-0.17.3/tests/patcher_test.py
+@@ -325,7 +325,7 @@ print(len(_threading._active))
          self.assertEqual(len(lines), 4, "\n".join(lines))
          assert lines[0].startswith('<Thread'), lines[0]
          self.assertEqual(lines[1], "1", lines[1])
          self.assertEqual(len(lines), 4, "\n".join(lines))
          assert lines[0].startswith('<Thread'), lines[0]
          self.assertEqual(lines[1], "1", lines[1])
@@ -14,7 +16,7 @@ Last-Update: 2014-09-07
  
      def test_threading(self):
          new_mod = """import eventlet
  
      def test_threading(self):
          new_mod = """import eventlet
-@@ -352,7 +352,7 @@ print(len(threading._active))
+@@ -356,7 +356,7 @@ print(len(threading._active))
  """
          self.write_to_tempfile("newmod", new_mod)
          output, lines = self.launch_subprocess('newmod')
  """
          self.write_to_tempfile("newmod", new_mod)
          output, lines = self.launch_subprocess('newmod')
index ad2fe58632824ced6e1d14eb803c547fb452cd0f..92e3d2191d6f9520815557bee6d7c9d54f8ae2e9 100644 (file)
@@ -1,4 +1,7 @@
-skip-failing-tests.patch
 remove-self.assert-in-tests.patcher_test.py.patch
 fix-FTBFS-on-sphinx-build.patch
 use-packaged-python-mock-rather-than-embedded.patch
 remove-self.assert-in-tests.patcher_test.py.patch
 fix-FTBFS-on-sphinx-build.patch
 use-packaged-python-mock-rather-than-embedded.patch
+enforce-tlsv1-always.patch
+set-defaults-to-be-tlsv1-not-sslv23.patch
+fixed-privacy-breach-in-examples.patch
+greenio_send_was_running_empty_loop_on_ENOTCONN.patch
diff --git a/debian/patches/set-defaults-to-be-tlsv1-not-sslv23.patch b/debian/patches/set-defaults-to-be-tlsv1-not-sslv23.patch
new file mode 100644 (file)
index 0000000..3df7176
--- /dev/null
@@ -0,0 +1,17 @@
+Description: Do not use SSLv23 by default, but TLSv1
+ Default protocol should really be TLSv1 and not SSLv23.
+Author: Thomas Goirand <zigo@debian.org>
+Forwarded: no
+Last-Update: 2015-05-21
+
+--- python-eventlet-0.17.3.orig/eventlet/green/ssl.py
++++ python-eventlet-0.17.3/eventlet/green/ssl.py
+@@ -46,7 +46,7 @@ class GreenSSLSocket(_original_sslsocket
+     def __init__(self, sock, keyfile=None, certfile=None,
+                  server_side=False, cert_reqs=CERT_NONE,
+-                 ssl_version=PROTOCOL_SSLv23, ca_certs=None,
++                 ssl_version=PROTOCOL_TLSv1, ca_certs=None,
+                  do_handshake_on_connect=True, *args, **kw):
+         if not isinstance(sock, GreenSocket):
+             sock = GreenSocket(sock)
diff --git a/debian/patches/skip-failing-tests.patch b/debian/patches/skip-failing-tests.patch
deleted file mode 100644 (file)
index 5f7816f..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
---- a/tests/greenio_test.py
-+++ b/tests/greenio_test.py
-@@ -761,6 +761,7 @@ class TestGreenPipe(LimitedTestCase):
- class TestGreenIoLong(LimitedTestCase):
-     TEST_TIMEOUT = 10  # the test here might take a while depending on the OS
-+    @skipped
-     @skip_with_pyevent
-     def test_multiple_readers(self, clibufsize=False):
-         debug.hub_prevent_multiple_readers(False)
index 2bbc30119bde572c6f8649d4c303a7a5dae01768..41654e8d56fda0dfc4f7b1e9e6d0e6f988a58202 100644 (file)
@@ -2,10 +2,10 @@ Description: Use the packaged python-mock package
  Upstream is "vendorizing" mock. This sux...
 Author: Thomas Goirand <zigo@debian.org>
 Forwarded: no
  Upstream is "vendorizing" mock. This sux...
 Author: Thomas Goirand <zigo@debian.org>
 Forwarded: no
-Last-Update: 2014-09-07
+Last-Update: 2015-02-08
 
 
---- a/tests/db_pool_test.py
-+++ b/tests/db_pool_test.py
+--- python-eventlet-0.16.1.orig/tests/db_pool_test.py
++++ python-eventlet-0.16.1/tests/db_pool_test.py
 @@ -7,7 +7,8 @@ import os
  import traceback
  from unittest import TestCase, main
 @@ -7,7 +7,8 @@ import os
  import traceback
  from unittest import TestCase, main
@@ -16,8 +16,8 @@ Last-Update: 2014-09-07
  from eventlet import event
  from eventlet import db_pool
  from eventlet.support import six
  from eventlet import event
  from eventlet import db_pool
  from eventlet.support import six
---- a/tests/websocket_test.py
-+++ b/tests/websocket_test.py
+--- python-eventlet-0.16.1.orig/tests/websocket_test.py
++++ python-eventlet-0.16.1/tests/websocket_test.py
 @@ -8,7 +8,8 @@ from eventlet.green import httplib
  from eventlet.support import six
  from eventlet.websocket import WebSocket, WebSocketWSGI
 @@ -8,7 +8,8 @@ from eventlet.green import httplib
  from eventlet.support import six
  from eventlet.websocket import WebSocket, WebSocketWSGI
similarity index 50%
rename from debian/doc-base
rename to debian/python-eventlet-doc.doc-base
index 64e4102c307f6b379cdca7938b57df05bf694d63..339232c3adaf8fd4399ba72d7dc80b90a55e7736 100644 (file)
@@ -5,8 +5,5 @@ Abstract: concurrent networking library for Python
 Section: Programming/Python
 
 Format: HTML
 Section: Programming/Python
 
 Format: HTML
-Index: /usr/share/doc/python-eventlet/html/index.html
-Files: /usr/share/doc/python-eventlet/html/*.html
-
-Format: Text
-Files: /usr/share/doc/python-eventlet/rst/index.txt
+Index: /usr/share/doc/python-eventlet-doc/html/index.html
+Files: /usr/share/doc/python-eventlet-doc/html/*
similarity index 100%
rename from debian/docs
rename to debian/python-eventlet-doc.docs
index 32a9363c5e6da427309808b9f31cf4d2dc79835b..12e908db1be2f2da7360ff713e0e3565eba0db74 100755 (executable)
@@ -1,25 +1,26 @@
 #!/usr/bin/make -f
 
 PYTHONS:=$(shell pyversions -vr)
 #!/usr/bin/make -f
 
 PYTHONS:=$(shell pyversions -vr)
-#PYTHON3S:=$(shell py3versions -vr)
+PYTHON3S:=$(shell py3versions -vr)
+
+UPSTREAM_GIT = git://github.com/eventlet/eventlet.git
+include /usr/share/openstack-pkg-tools/pkgos.make
 
 %:
 
 %:
-       dh $@ -O--buildsystem=python_distutils --with python2,sphinxdoc
-#      dh $@ -O--buildsystem=python_distutils --with python2,python3,sphinxdoc
+       dh $@ -O--buildsystem=python_distutils --with python2,python3,sphinxdoc
 
 override_dh_install:
 
 override_dh_install:
-       set -e && for pyvers in $(PYTHONS); do \
+       set -e ; for pyvers in $(PYTHONS); do \
                python$$pyvers setup.py install --install-layout=deb \
                        --root $(CURDIR)/debian/python-eventlet; \
        done
                python$$pyvers setup.py install --install-layout=deb \
                        --root $(CURDIR)/debian/python-eventlet; \
        done
-#      set -e && for pyvers in $(PYTHON3S); do \
-#              python$$pyvers setup.py install --install-layout=deb \
-#                      --root $(CURDIR)/debian/python3-eventlet; \
-#      done
+       set -e ; for pyvers in $(PYTHON3S); do \
+               python$$pyvers setup.py install --install-layout=deb \
+                       --root $(CURDIR)/debian/python3-eventlet; \
+       done
 
 override_dh_sphinxdoc:
 
 override_dh_sphinxdoc:
-       #PYTHONPATH=. sphinx-build -N -b html doc/ debian/python-eventlet-doc/usr/share/doc/python-eventlet-doc/html
-       PYTHONPATH=. sphinx-build -N -b html doc/ debian/python-eventlet/usr/share/doc/python-eventlet/html
+       PYTHONPATH=. sphinx-build -N -b html doc $(CURDIR)/debian/python-eventlet-doc/usr/share/doc/python-eventlet-doc/html
        dh_sphinxdoc -O--buildsystem=python_distutils
 
 override_dh_auto_test:
        dh_sphinxdoc -O--buildsystem=python_distutils
 
 override_dh_auto_test:
diff --git a/eventlet/PKG-INFO b/eventlet/PKG-INFO
deleted file mode 100644 (file)
index aff4111..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-Metadata-Version: 1.1
-Name: eventlet
-Version: 0.16.1
-Summary: Highly concurrent networking library
-Home-page: http://eventlet.net
-Author: Linden Lab
-Author-email: eventletdev@lists.secondlife.com
-License: UNKNOWN
-Description: Eventlet is a concurrent networking library for Python that allows you to change how you run your code, not how you write it.
-        
-        It uses epoll or libevent for highly scalable non-blocking I/O.  Coroutines ensure that the developer uses a blocking style of programming that is similar to threading, but provide the benefits of non-blocking I/O.  The event dispatch is implicit, which means you can easily use Eventlet from the Python interpreter, or as a small part of a larger application.
-        
-        It's easy to get started using Eventlet, and easy to convert existing
-        applications to use it.  Start off by looking at the `examples`_,
-        `common design patterns`_, and the list of `basic API primitives`_.
-        
-        .. _examples: http://eventlet.net/doc/examples.html
-        .. _common design patterns: http://eventlet.net/doc/design_patterns.html
-        .. _basic API primitives: http://eventlet.net/doc/basic_usage.html
-        
-        
-        Quick Example
-        ===============
-        
-        Here's something you can try right on the command line::
-        
-            % python
-            >>> import eventlet
-            >>> from eventlet.green import urllib2
-            >>> gt = eventlet.spawn(urllib2.urlopen, 'http://eventlet.net')
-            >>> gt2 = eventlet.spawn(urllib2.urlopen, 'http://secondlife.com')
-            >>> gt2.wait()
-            >>> gt.wait()
-        
-        
-        Getting Eventlet
-        ==================
-        
-        The easiest way to get Eventlet is to use pip::
-        
-          pip install eventlet
-        
-        The development `tip`_ is available as well::
-        
-          pip install 'eventlet==dev'
-        
-        .. _tip: http://bitbucket.org/eventlet/eventlet/get/tip.zip#egg=eventlet-dev
-        
-        
-        Building the Docs Locally
-        =========================
-        
-        To build a complete set of HTML documentation, you must have Sphinx, which can be found at http://sphinx.pocoo.org/ (or installed with `pip install Sphinx`)::
-        
-          cd doc
-          make html
-        
-        The built html files can be found in doc/_build/html afterward.
-        
-        
-        Twisted
-        =======
-        
-        Eventlet had Twisted hub in the past, but community interest to this integration has dropped over time,
-        now it is not supported, so with apologies for any inconvenience we discontinue Twisted integration.
-        
-        If you have a project that uses Eventlet with Twisted, your options are:
-        
-        * use last working release eventlet==0.14
-        * start a new project with only Twisted hub code, identify and fix problems. As of eventlet 0.13,
-        `EVENTLET_HUB` environment variable can point to external modules.
-        * fork Eventlet, revert Twisted removal, identify and fix problems. This work may be merged back into main project.
-        
-Platform: UNKNOWN
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Programming Language :: Python
-Classifier: Operating System :: MacOS :: MacOS X
-Classifier: Operating System :: POSIX
-Classifier: Operating System :: Microsoft :: Windows
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Topic :: Internet
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Classifier: Intended Audience :: Developers
-Classifier: Development Status :: 4 - Beta
diff --git a/eventlet/eventlet.egg-info/PKG-INFO b/eventlet/eventlet.egg-info/PKG-INFO
deleted file mode 100644 (file)
index aff4111..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-Metadata-Version: 1.1
-Name: eventlet
-Version: 0.16.1
-Summary: Highly concurrent networking library
-Home-page: http://eventlet.net
-Author: Linden Lab
-Author-email: eventletdev@lists.secondlife.com
-License: UNKNOWN
-Description: Eventlet is a concurrent networking library for Python that allows you to change how you run your code, not how you write it.
-        
-        It uses epoll or libevent for highly scalable non-blocking I/O.  Coroutines ensure that the developer uses a blocking style of programming that is similar to threading, but provide the benefits of non-blocking I/O.  The event dispatch is implicit, which means you can easily use Eventlet from the Python interpreter, or as a small part of a larger application.
-        
-        It's easy to get started using Eventlet, and easy to convert existing
-        applications to use it.  Start off by looking at the `examples`_,
-        `common design patterns`_, and the list of `basic API primitives`_.
-        
-        .. _examples: http://eventlet.net/doc/examples.html
-        .. _common design patterns: http://eventlet.net/doc/design_patterns.html
-        .. _basic API primitives: http://eventlet.net/doc/basic_usage.html
-        
-        
-        Quick Example
-        ===============
-        
-        Here's something you can try right on the command line::
-        
-            % python
-            >>> import eventlet
-            >>> from eventlet.green import urllib2
-            >>> gt = eventlet.spawn(urllib2.urlopen, 'http://eventlet.net')
-            >>> gt2 = eventlet.spawn(urllib2.urlopen, 'http://secondlife.com')
-            >>> gt2.wait()
-            >>> gt.wait()
-        
-        
-        Getting Eventlet
-        ==================
-        
-        The easiest way to get Eventlet is to use pip::
-        
-          pip install eventlet
-        
-        The development `tip`_ is available as well::
-        
-          pip install 'eventlet==dev'
-        
-        .. _tip: http://bitbucket.org/eventlet/eventlet/get/tip.zip#egg=eventlet-dev
-        
-        
-        Building the Docs Locally
-        =========================
-        
-        To build a complete set of HTML documentation, you must have Sphinx, which can be found at http://sphinx.pocoo.org/ (or installed with `pip install Sphinx`)::
-        
-          cd doc
-          make html
-        
-        The built html files can be found in doc/_build/html afterward.
-        
-        
-        Twisted
-        =======
-        
-        Eventlet had Twisted hub in the past, but community interest to this integration has dropped over time,
-        now it is not supported, so with apologies for any inconvenience we discontinue Twisted integration.
-        
-        If you have a project that uses Eventlet with Twisted, your options are:
-        
-        * use last working release eventlet==0.14
-        * start a new project with only Twisted hub code, identify and fix problems. As of eventlet 0.13,
-        `EVENTLET_HUB` environment variable can point to external modules.
-        * fork Eventlet, revert Twisted removal, identify and fix problems. This work may be merged back into main project.
-        
-Platform: UNKNOWN
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Programming Language :: Python
-Classifier: Operating System :: MacOS :: MacOS X
-Classifier: Operating System :: POSIX
-Classifier: Operating System :: Microsoft :: Windows
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Topic :: Internet
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Classifier: Intended Audience :: Developers
-Classifier: Development Status :: 4 - Beta
diff --git a/eventlet/eventlet.egg-info/SOURCES.txt b/eventlet/eventlet.egg-info/SOURCES.txt
deleted file mode 100644 (file)
index 9124c3d..0000000
+++ /dev/null
@@ -1,202 +0,0 @@
-AUTHORS
-LICENSE
-MANIFEST.in
-NEWS
-README.rst
-setup.cfg
-setup.py
-doc/Makefile
-doc/authors.rst
-doc/basic_usage.rst
-doc/common.txt
-doc/conf.py
-doc/design_patterns.rst
-doc/environment.rst
-doc/examples.rst
-doc/history.rst
-doc/hubs.rst
-doc/index.rst
-doc/modules.rst
-doc/patching.rst
-doc/ssl.rst
-doc/testing.rst
-doc/threading.rst
-doc/zeromq.rst
-doc/images/threading_illustration.png
-doc/modules/backdoor.rst
-doc/modules/corolocal.rst
-doc/modules/db_pool.rst
-doc/modules/debug.rst
-doc/modules/event.rst
-doc/modules/greenpool.rst
-doc/modules/greenthread.rst
-doc/modules/pools.rst
-doc/modules/queue.rst
-doc/modules/semaphore.rst
-doc/modules/timeout.rst
-doc/modules/websocket.rst
-doc/modules/wsgi.rst
-doc/modules/zmq.rst
-eventlet/__init__.py
-eventlet/backdoor.py
-eventlet/convenience.py
-eventlet/corolocal.py
-eventlet/coros.py
-eventlet/db_pool.py
-eventlet/debug.py
-eventlet/event.py
-eventlet/greenio.py
-eventlet/greenpool.py
-eventlet/greenthread.py
-eventlet/patcher.py
-eventlet/pools.py
-eventlet/queue.py
-eventlet/semaphore.py
-eventlet/timeout.py
-eventlet/tpool.py
-eventlet/websocket.py
-eventlet/wsgi.py
-eventlet.egg-info/PKG-INFO
-eventlet.egg-info/SOURCES.txt
-eventlet.egg-info/dependency_links.txt
-eventlet.egg-info/not-zip-safe
-eventlet.egg-info/requires.txt
-eventlet.egg-info/top_level.txt
-eventlet/green/BaseHTTPServer.py
-eventlet/green/CGIHTTPServer.py
-eventlet/green/MySQLdb.py
-eventlet/green/Queue.py
-eventlet/green/SimpleHTTPServer.py
-eventlet/green/SocketServer.py
-eventlet/green/__init__.py
-eventlet/green/_socket_nodns.py
-eventlet/green/asynchat.py
-eventlet/green/asyncore.py
-eventlet/green/builtin.py
-eventlet/green/ftplib.py
-eventlet/green/httplib.py
-eventlet/green/os.py
-eventlet/green/profile.py
-eventlet/green/select.py
-eventlet/green/socket.py
-eventlet/green/ssl.py
-eventlet/green/subprocess.py
-eventlet/green/thread.py
-eventlet/green/threading.py
-eventlet/green/time.py
-eventlet/green/urllib.py
-eventlet/green/urllib2.py
-eventlet/green/zmq.py
-eventlet/green/OpenSSL/SSL.py
-eventlet/green/OpenSSL/__init__.py
-eventlet/green/OpenSSL/crypto.py
-eventlet/green/OpenSSL/rand.py
-eventlet/green/OpenSSL/tsafe.py
-eventlet/green/OpenSSL/version.py
-eventlet/hubs/__init__.py
-eventlet/hubs/epolls.py
-eventlet/hubs/hub.py
-eventlet/hubs/kqueue.py
-eventlet/hubs/poll.py
-eventlet/hubs/pyevent.py
-eventlet/hubs/selects.py
-eventlet/hubs/timer.py
-eventlet/support/__init__.py
-eventlet/support/greendns.py
-eventlet/support/greenlets.py
-eventlet/support/psycopg2_patcher.py
-eventlet/support/pylib.py
-eventlet/support/six.py
-eventlet/support/stacklesspypys.py
-eventlet/support/stacklesss.py
-examples/chat_bridge.py
-examples/chat_server.py
-examples/connect.py
-examples/distributed_websocket_chat.py
-examples/echoserver.py
-examples/feedscraper-testclient.py
-examples/feedscraper.py
-examples/forwarder.py
-examples/producer_consumer.py
-examples/recursive_crawler.py
-examples/webcrawler.py
-examples/websocket.html
-examples/websocket.py
-examples/websocket_chat.html
-examples/websocket_chat.py
-examples/wsgi.py
-examples/zmq_chat.py
-examples/zmq_simple.py
-tests/__init__.py
-tests/api_test.py
-tests/backdoor_test.py
-tests/convenience_test.py
-tests/db_pool_test.py
-tests/debug_test.py
-tests/env_test.py
-tests/event_test.py
-tests/fork_test.py
-tests/greendns_test.py
-tests/greenio_test.py
-tests/greenpipe_test_with_statement.py
-tests/greenpool_test.py
-tests/greenthread_test.py
-tests/hub_test.py
-tests/hub_test_fork.py
-tests/mock.py
-tests/mysqldb_test.py
-tests/mysqldb_test_monkey_patch.py
-tests/nosewrapper.py
-tests/parse_results.py
-tests/patcher_psycopg_test.py
-tests/patcher_test.py
-tests/patcher_test_importlib_lock.py
-tests/pools_test.py
-tests/queue_test.py
-tests/semaphore_test.py
-tests/socket_test.py
-tests/ssl_test.py
-tests/subprocess_test.py
-tests/test__event.py
-tests/test__greenness.py
-tests/test__refcount.py
-tests/test__socket_errors.py
-tests/test_server.crt
-tests/test_server.key
-tests/thread_test.py
-tests/timeout_test.py
-tests/timeout_test_with_statement.py
-tests/timer_test.py
-tests/tpool_test.py
-tests/websocket_new_test.py
-tests/websocket_test.py
-tests/wsgi_test.py
-tests/wsgi_test_conntimeout.py
-tests/zmq_test.py
-tests/manual/__init__.py
-tests/manual/greenio_memtest.py
-tests/stdlib/all.py
-tests/stdlib/all_modules.py
-tests/stdlib/all_monkey.py
-tests/stdlib/test_SimpleHTTPServer.py
-tests/stdlib/test_asynchat.py
-tests/stdlib/test_asyncore.py
-tests/stdlib/test_ftplib.py
-tests/stdlib/test_httplib.py
-tests/stdlib/test_httpservers.py
-tests/stdlib/test_os.py
-tests/stdlib/test_queue.py
-tests/stdlib/test_select.py
-tests/stdlib/test_socket.py
-tests/stdlib/test_socket_ssl.py
-tests/stdlib/test_socketserver.py
-tests/stdlib/test_ssl.py
-tests/stdlib/test_subprocess.py
-tests/stdlib/test_thread.py
-tests/stdlib/test_thread__boundedsem.py
-tests/stdlib/test_threading.py
-tests/stdlib/test_threading_local.py
-tests/stdlib/test_timeout.py
-tests/stdlib/test_urllib.py
-tests/stdlib/test_urllib2.py
-tests/stdlib/test_urllib2_localnet.py
\ No newline at end of file
diff --git a/eventlet/eventlet.egg-info/dependency_links.txt b/eventlet/eventlet.egg-info/dependency_links.txt
deleted file mode 100644 (file)
index 8b13789..0000000
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/eventlet/eventlet.egg-info/not-zip-safe b/eventlet/eventlet.egg-info/not-zip-safe
deleted file mode 100644 (file)
index 8b13789..0000000
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/eventlet/eventlet.egg-info/requires.txt b/eventlet/eventlet.egg-info/requires.txt
deleted file mode 100644 (file)
index 9ca9a36..0000000
+++ /dev/null
@@ -1 +0,0 @@
-greenlet >= 0.3
\ No newline at end of file
diff --git a/eventlet/eventlet.egg-info/top_level.txt b/eventlet/eventlet.egg-info/top_level.txt
deleted file mode 100644 (file)
index b806a57..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-tests
-eventlet
diff --git a/eventlet/eventlet/green/_socket_nodns.py b/eventlet/eventlet/green/_socket_nodns.py
deleted file mode 100644 (file)
index 373c140..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-__socket = __import__('socket')
-
-__all__ = __socket.__all__
-__patched__ = ['fromfd', 'socketpair', 'ssl', 'socket']
-
-from eventlet.patcher import slurp_properties
-slurp_properties(__socket, globals(),
-                 ignore=__patched__, srckeys=dir(__socket))
-
-os = __import__('os')
-import sys
-from eventlet.hubs import get_hub
-from eventlet.greenio import GreenSocket as socket
-from eventlet.greenio import SSL as _SSL  # for exceptions
-from eventlet.greenio import _GLOBAL_DEFAULT_TIMEOUT
-from eventlet.greenio import _fileobject
-
-try:
-    __original_fromfd__ = __socket.fromfd
-
-    def fromfd(*args):
-        return socket(__original_fromfd__(*args))
-except AttributeError:
-    pass
-
-try:
-    __original_socketpair__ = __socket.socketpair
-
-    def socketpair(*args):
-        one, two = __original_socketpair__(*args)
-        return socket(one), socket(two)
-except AttributeError:
-    pass
-
-
-def _convert_to_sslerror(ex):
-    """ Transliterates SSL.SysCallErrors to socket.sslerrors"""
-    return sslerror((ex.args[0], ex.args[1]))
-
-
-class GreenSSLObject(object):
-    """ Wrapper object around the SSLObjects returned by socket.ssl, which have a
-    slightly different interface from SSL.Connection objects. """
-
-    def __init__(self, green_ssl_obj):
-        """ Should only be called by a 'green' socket.ssl """
-        self.connection = green_ssl_obj
-        try:
-            # if it's already connected, do the handshake
-            self.connection.getpeername()
-        except:
-            pass
-        else:
-            try:
-                self.connection.do_handshake()
-            except _SSL.SysCallError as e:
-                raise _convert_to_sslerror(e)
-
-    def read(self, n=1024):
-        """If n is provided, read n bytes from the SSL connection, otherwise read
-        until EOF. The return value is a string of the bytes read."""
-        try:
-            return self.connection.read(n)
-        except _SSL.ZeroReturnError:
-            return ''
-        except _SSL.SysCallError as e:
-            raise _convert_to_sslerror(e)
-
-    def write(self, s):
-        """Writes the string s to the on the object's SSL connection.
-        The return value is the number of bytes written. """
-        try:
-            return self.connection.write(s)
-        except _SSL.SysCallError as e:
-            raise _convert_to_sslerror(e)
-
-    def server(self):
-        """ Returns a string describing the server's certificate. Useful for debugging
-        purposes; do not parse the content of this string because its format can't be
-        parsed unambiguously. """
-        return str(self.connection.get_peer_certificate().get_subject())
-
-    def issuer(self):
-        """Returns a string describing the issuer of the server's certificate. Useful
-        for debugging purposes; do not parse the content of this string because its
-        format can't be parsed unambiguously."""
-        return str(self.connection.get_peer_certificate().get_issuer())
diff --git a/eventlet/eventlet/green/urllib.py b/eventlet/eventlet/green/urllib.py
deleted file mode 100644 (file)
index f5c8f13..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-from eventlet import patcher
-from eventlet.green import socket
-from eventlet.green import time
-from eventlet.green import httplib
-from eventlet.green import ftplib
-
-to_patch = [('socket', socket), ('httplib', httplib),
-            ('time', time), ('ftplib', ftplib)]
-try:
-    from eventlet.green import ssl
-    to_patch.append(('ssl', ssl))
-except ImportError:
-    pass
-
-patcher.inject('urllib', globals(), *to_patch)
-try:
-    URLopener
-except NameError:
-    patcher.inject('urllib.request', globals(), *to_patch)
-
-
-# patch a bunch of things that have imports inside the
-# function body; this is lame and hacky but I don't feel
-# too bad because urllib is a hacky pile of junk that no
-# one should be using anyhow
-URLopener.open_http = patcher.patch_function(URLopener.open_http, ('httplib', httplib))
-if hasattr(URLopener, 'open_https'):
-    URLopener.open_https = patcher.patch_function(URLopener.open_https, ('httplib', httplib))
-
-URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, ('ftplib', ftplib))
-ftpwrapper.init = patcher.patch_function(ftpwrapper.init, ('ftplib', ftplib))
-ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, ('ftplib', ftplib))
-
-del patcher
-
-# Run test program when run as a script
-if __name__ == '__main__':
-    main()
diff --git a/eventlet/eventlet/support/greendns.py b/eventlet/eventlet/support/greendns.py
deleted file mode 100644 (file)
index c357866..0000000
+++ /dev/null
@@ -1,480 +0,0 @@
-#!/usr/bin/env python
-'''
-    greendns - non-blocking DNS support for Eventlet
-'''
-
-# Portions of this code taken from the gogreen project:
-#   http://github.com/slideinc/gogreen
-#
-# Copyright (c) 2005-2010 Slide, Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-#       copyright notice, this list of conditions and the following
-#       disclaimer in the documentation and/or other materials provided
-#       with the distribution.
-#     * Neither the name of the author nor the names of other
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import struct
-import sys
-
-from eventlet import patcher
-from eventlet.green import _socket_nodns
-from eventlet.green import time
-from eventlet.green import select
-
-dns = patcher.import_patched('dns',
-                             socket=_socket_nodns,
-                             time=time,
-                             select=select)
-for pkg in ('dns.query', 'dns.exception', 'dns.inet', 'dns.message',
-            'dns.rdatatype', 'dns.resolver', 'dns.reversename'):
-    setattr(dns, pkg.split('.')[1], patcher.import_patched(
-        pkg,
-        socket=_socket_nodns,
-        time=time,
-        select=select))
-
-socket = _socket_nodns
-
-DNS_QUERY_TIMEOUT = 10.0
-
-
-#
-# Resolver instance used to perfrom DNS lookups.
-#
-class FakeAnswer(list):
-    expiration = 0
-
-
-class FakeRecord(object):
-    pass
-
-
-class ResolverProxy(object):
-    def __init__(self, *args, **kwargs):
-        self._resolver = None
-        self._filename = kwargs.get('filename', '/etc/resolv.conf')
-        self._hosts = {}
-        if kwargs.pop('dev', False):
-            self._load_etc_hosts()
-
-    def _load_etc_hosts(self):
-        try:
-            fd = open('/etc/hosts', 'r')
-            contents = fd.read()
-            fd.close()
-        except (IOError, OSError):
-            return
-        contents = [line for line in contents.split('\n') if line and not line[0] == '#']
-        for line in contents:
-            line = line.replace('\t', ' ')
-            parts = line.split(' ')
-            parts = [p for p in parts if p]
-            if not len(parts):
-                continue
-            ip = parts[0]
-            for part in parts[1:]:
-                self._hosts[part] = ip
-
-    def clear(self):
-        self._resolver = None
-
-    def query(self, *args, **kwargs):
-        if self._resolver is None:
-            self._resolver = dns.resolver.Resolver(filename=self._filename)
-            self._resolver.cache = dns.resolver.Cache()
-
-        query = args[0]
-        if query is None:
-            args = list(args)
-            query = args[0] = '0.0.0.0'
-        if self._hosts and self._hosts.get(query):
-            answer = FakeAnswer()
-            record = FakeRecord()
-            setattr(record, 'address', self._hosts[query])
-            answer.append(record)
-            return answer
-        return self._resolver.query(*args, **kwargs)
-#
-# cache
-#
-resolver = ResolverProxy(dev=True)
-
-
-def resolve(name):
-    error = None
-    rrset = None
-
-    if rrset is None or time.time() > rrset.expiration:
-        try:
-            rrset = resolver.query(name)
-        except dns.exception.Timeout:
-            error = (socket.EAI_AGAIN, 'Lookup timed out')
-        except dns.exception.DNSException:
-            error = (socket.EAI_NODATA, 'No address associated with hostname')
-        else:
-            pass
-            # responses.insert(name, rrset)
-
-    if error:
-        if rrset is None:
-            raise socket.gaierror(error)
-        else:
-            sys.stderr.write('DNS error: %r %r\n' % (name, error))
-    return rrset
-
-
-#
-# methods
-#
-def getaliases(host):
-    """Checks for aliases of the given hostname (cname records)
-    returns a list of alias targets
-    will return an empty list if no aliases
-    """
-    cnames = []
-    error = None
-
-    try:
-        answers = dns.resolver.query(host, 'cname')
-    except dns.exception.Timeout:
-        error = (socket.EAI_AGAIN, 'Lookup timed out')
-    except dns.exception.DNSException:
-        error = (socket.EAI_NODATA, 'No address associated with hostname')
-    else:
-        for record in answers:
-            cnames.append(str(answers[0].target))
-
-    if error:
-        sys.stderr.write('DNS error: %r %r\n' % (host, error))
-
-    return cnames
-
-
-def getaddrinfo(host, port, family=0, socktype=0, proto=0, flags=0):
-    """Replacement for Python's socket.getaddrinfo.
-
-    Currently only supports IPv4.  At present, flags are not
-    implemented.
-    """
-    socktype = socktype or socket.SOCK_STREAM
-
-    if is_ipv4_addr(host):
-        return [(socket.AF_INET, socktype, proto, '', (host, port))]
-
-    rrset = resolve(host)
-    value = []
-
-    for rr in rrset:
-        value.append((socket.AF_INET, socktype, proto, '', (rr.address, port)))
-    return value
-
-
-def gethostbyname(hostname):
-    """Replacement for Python's socket.gethostbyname.
-
-    Currently only supports IPv4.
-    """
-    if is_ipv4_addr(hostname):
-        return hostname
-
-    rrset = resolve(hostname)
-    return rrset[0].address
-
-
-def gethostbyname_ex(hostname):
-    """Replacement for Python's socket.gethostbyname_ex.
-
-    Currently only supports IPv4.
-    """
-    if is_ipv4_addr(hostname):
-        return (hostname, [], [hostname])
-
-    rrset = resolve(hostname)
-    addrs = []
-
-    for rr in rrset:
-        addrs.append(rr.address)
-    return (hostname, [], addrs)
-
-
-def getnameinfo(sockaddr, flags):
-    """Replacement for Python's socket.getnameinfo.
-
-    Currently only supports IPv4.
-    """
-    try:
-        host, port = sockaddr
-    except (ValueError, TypeError):
-        if not isinstance(sockaddr, tuple):
-            del sockaddr  # to pass a stdlib test that is
-            # hyper-careful about reference counts
-            raise TypeError('getnameinfo() argument 1 must be a tuple')
-        else:
-            # must be ipv6 sockaddr, pretending we don't know how to resolve it
-            raise socket.gaierror(-2, 'name or service not known')
-
-    if (flags & socket.NI_NAMEREQD) and (flags & socket.NI_NUMERICHOST):
-        # Conflicting flags.  Punt.
-        raise socket.gaierror(
-            (socket.EAI_NONAME, 'Name or service not known'))
-
-    if is_ipv4_addr(host):
-        try:
-            rrset = resolver.query(
-                dns.reversename.from_address(host), dns.rdatatype.PTR)
-            if len(rrset) > 1:
-                raise socket.error('sockaddr resolved to multiple addresses')
-            host = rrset[0].target.to_text(omit_final_dot=True)
-        except dns.exception.Timeout:
-            if flags & socket.NI_NAMEREQD:
-                raise socket.gaierror((socket.EAI_AGAIN, 'Lookup timed out'))
-        except dns.exception.DNSException:
-            if flags & socket.NI_NAMEREQD:
-                raise socket.gaierror(
-                    (socket.EAI_NONAME, 'Name or service not known'))
-    else:
-        try:
-            rrset = resolver.query(host)
-            if len(rrset) > 1:
-                raise socket.error('sockaddr resolved to multiple addresses')
-            if flags & socket.NI_NUMERICHOST:
-                host = rrset[0].address
-        except dns.exception.Timeout:
-            raise socket.gaierror((socket.EAI_AGAIN, 'Lookup timed out'))
-        except dns.exception.DNSException:
-            raise socket.gaierror(
-                (socket.EAI_NODATA, 'No address associated with hostname'))
-
-    if not (flags & socket.NI_NUMERICSERV):
-        proto = (flags & socket.NI_DGRAM) and 'udp' or 'tcp'
-        port = socket.getservbyport(port, proto)
-
-    return (host, port)
-
-
-def is_ipv4_addr(host):
-    """is_ipv4_addr returns true if host is a valid IPv4 address in
-    dotted quad notation.
-    """
-    try:
-        d1, d2, d3, d4 = map(int, host.split('.'))
-    except (ValueError, AttributeError):
-        return False
-
-    if 0 <= d1 <= 255 and 0 <= d2 <= 255 and 0 <= d3 <= 255 and 0 <= d4 <= 255:
-        return True
-    return False
-
-
-def _net_read(sock, count, expiration):
-    """coro friendly replacement for dns.query._net_write
-    Read the specified number of bytes from sock.  Keep trying until we
-    either get the desired amount, or we hit EOF.
-    A Timeout exception will be raised if the operation is not completed
-    by the expiration time.
-    """
-    s = ''
-    while count > 0:
-        try:
-            n = sock.recv(count)
-        except socket.timeout:
-            # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
-            if expiration - time.time() <= 0.0:
-                raise dns.exception.Timeout
-        if n == '':
-            raise EOFError
-        count = count - len(n)
-        s = s + n
-    return s
-
-
-def _net_write(sock, data, expiration):
-    """coro friendly replacement for dns.query._net_write
-    Write the specified data to the socket.
-    A Timeout exception will be raised if the operation is not completed
-    by the expiration time.
-    """
-    current = 0
-    l = len(data)
-    while current < l:
-        try:
-            current += sock.send(data[current:])
-        except socket.timeout:
-            # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
-            if expiration - time.time() <= 0.0:
-                raise dns.exception.Timeout
-
-
-def udp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53, af=None, source=None,
-        source_port=0, ignore_unexpected=False):
-    """coro friendly replacement for dns.query.udp
-    Return the response obtained after sending a query via UDP.
-
-    @param q: the query
-    @type q: dns.message.Message
-    @param where: where to send the message
-    @type where: string containing an IPv4 or IPv6 address
-    @param timeout: The number of seconds to wait before the query times out.
-    If None, the default, wait forever.
-    @type timeout: float
-    @param port: The port to which to send the message.  The default is 53.
-    @type port: int
-    @param af: the address family to use.  The default is None, which
-    causes the address family to use to be inferred from the form of of where.
-    If the inference attempt fails, AF_INET is used.
-    @type af: int
-    @rtype: dns.message.Message object
-    @param source: source address.  The default is the IPv4 wildcard address.
-    @type source: string
-    @param source_port: The port from which to send the message.
-    The default is 0.
-    @type source_port: int
-    @param ignore_unexpected: If True, ignore responses from unexpected
-    sources.  The default is False.
-    @type ignore_unexpected: bool"""
-
-    wire = q.to_wire()
-    if af is None:
-        try:
-            af = dns.inet.af_for_address(where)
-        except:
-            af = dns.inet.AF_INET
-    if af == dns.inet.AF_INET:
-        destination = (where, port)
-        if source is not None:
-            source = (source, source_port)
-    elif af == dns.inet.AF_INET6:
-        destination = (where, port, 0, 0)
-        if source is not None:
-            source = (source, source_port, 0, 0)
-
-    s = socket.socket(af, socket.SOCK_DGRAM)
-    s.settimeout(timeout)
-    try:
-        expiration = dns.query._compute_expiration(timeout)
-        if source is not None:
-            s.bind(source)
-        try:
-            s.sendto(wire, destination)
-        except socket.timeout:
-            # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
-            if expiration - time.time() <= 0.0:
-                raise dns.exception.Timeout
-        while 1:
-            try:
-                (wire, from_address) = s.recvfrom(65535)
-            except socket.timeout:
-                # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
-                if expiration - time.time() <= 0.0:
-                    raise dns.exception.Timeout
-            if from_address == destination:
-                break
-            if not ignore_unexpected:
-                raise dns.query.UnexpectedSource(
-                    'got a response from %s instead of %s'
-                    % (from_address, destination))
-    finally:
-        s.close()
-
-    r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac)
-    if not q.is_response(r):
-        raise dns.query.BadResponse()
-    return r
-
-
-def tcp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53,
-        af=None, source=None, source_port=0):
-    """coro friendly replacement for dns.query.tcp
-    Return the response obtained after sending a query via TCP.
-
-    @param q: the query
-    @type q: dns.message.Message object
-    @param where: where to send the message
-    @type where: string containing an IPv4 or IPv6 address
-    @param timeout: The number of seconds to wait before the query times out.
-    If None, the default, wait forever.
-    @type timeout: float
-    @param port: The port to which to send the message.  The default is 53.
-    @type port: int
-    @param af: the address family to use.  The default is None, which
-    causes the address family to use to be inferred from the form of of where.
-    If the inference attempt fails, AF_INET is used.
-    @type af: int
-    @rtype: dns.message.Message object
-    @param source: source address.  The default is the IPv4 wildcard address.
-    @type source: string
-    @param source_port: The port from which to send the message.
-    The default is 0.
-    @type source_port: int"""
-
-    wire = q.to_wire()
-    if af is None:
-        try:
-            af = dns.inet.af_for_address(where)
-        except:
-            af = dns.inet.AF_INET
-    if af == dns.inet.AF_INET:
-        destination = (where, port)
-        if source is not None:
-            source = (source, source_port)
-    elif af == dns.inet.AF_INET6:
-        destination = (where, port, 0, 0)
-        if source is not None:
-            source = (source, source_port, 0, 0)
-    s = socket.socket(af, socket.SOCK_STREAM)
-    s.settimeout(timeout)
-    try:
-        expiration = dns.query._compute_expiration(timeout)
-        if source is not None:
-            s.bind(source)
-        try:
-            s.connect(destination)
-        except socket.timeout:
-            # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
-            if expiration - time.time() <= 0.0:
-                raise dns.exception.Timeout
-
-        l = len(wire)
-        # copying the wire into tcpmsg is inefficient, but lets us
-        # avoid writev() or doing a short write that would get pushed
-        # onto the net
-        tcpmsg = struct.pack("!H", l) + wire
-        _net_write(s, tcpmsg, expiration)
-        ldata = _net_read(s, 2, expiration)
-        (l,) = struct.unpack("!H", ldata)
-        wire = _net_read(s, l, expiration)
-    finally:
-        s.close()
-    r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac)
-    if not q.is_response(r):
-        raise dns.query.BadResponse()
-    return r
-
-
-def reset():
-    resolver.clear()
-
-# Install our coro-friendly replacements for the tcp and udp query methods.
-dns.query.tcp = tcp
-dns.query.udp = udp
diff --git a/eventlet/eventlet/support/greenlets.py b/eventlet/eventlet/support/greenlets.py
deleted file mode 100644 (file)
index 6f3b9bc..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-import distutils.version
-
-try:
-    import greenlet
-    getcurrent = greenlet.greenlet.getcurrent
-    GreenletExit = greenlet.greenlet.GreenletExit
-    preserves_excinfo = (distutils.version.LooseVersion(greenlet.__version__)
-                         >= distutils.version.LooseVersion('0.3.2'))
-    greenlet = greenlet.greenlet
-except ImportError as e:
-    raise
-    try:
-        from py.magic import greenlet
-        getcurrent = greenlet.getcurrent
-        GreenletExit = greenlet.GreenletExit
-        preserves_excinfo = False
-    except ImportError:
-        try:
-            from stackless import greenlet
-            getcurrent = greenlet.getcurrent
-            GreenletExit = greenlet.GreenletExit
-            preserves_excinfo = False
-        except ImportError:
-            try:
-                from support.stacklesss import greenlet, getcurrent, GreenletExit
-                preserves_excinfo = False
-                (greenlet, getcurrent, GreenletExit)  # silence pyflakes
-            except ImportError as e:
-                raise ImportError("Unable to find an implementation of greenlet.")
diff --git a/eventlet/setup.cfg b/eventlet/setup.cfg
deleted file mode 100644 (file)
index dbd54fd..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-[wheel]
-universal = True
-
-[egg_info]
-tag_build = 
-tag_date = 0
-tag_svn_revision = 0
-
diff --git a/eventlet/tests/greendns_test.py b/eventlet/tests/greendns_test.py
deleted file mode 100644 (file)
index 7a64b1e..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-from nose.plugins.skip import SkipTest
-
-
-def test_greendns_getnameinfo_resolve_port():
-    try:
-        from eventlet.support import greendns
-    except ImportError:
-        raise SkipTest('greendns requires package dnspython')
-
-    # https://bitbucket.org/eventlet/eventlet/issue/152
-    _, port1 = greendns.getnameinfo(('127.0.0.1', 80), 0)
-    _, port2 = greendns.getnameinfo(('localhost', 80), 0)
-    assert port1 == port2 == 'http'
diff --git a/eventlet/tests/mysqldb_test_monkey_patch.py b/eventlet/tests/mysqldb_test_monkey_patch.py
deleted file mode 100644 (file)
index a76178e..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-from __future__ import print_function
-from eventlet import patcher
-
-# no standard tests in this file, ignore
-__test__ = False
-
-if __name__ == '__main__':
-    import MySQLdb as m
-    from eventlet.green import MySQLdb as gm
-    patcher.monkey_patch(all=True, MySQLdb=True)
-    print("mysqltest {0}".format(",".join(sorted(patcher.already_patched.keys()))))
-    print("connect {0}".format(m.connect == gm.connect))
similarity index 88%
rename from eventlet/AUTHORS
rename to python-eventlet/AUTHORS
index df79f4874c720d986800465bd5c381561248ccc1..e0ab0e2aa946225c1ef09fb66cc3a92748b061db 100644 (file)
@@ -37,6 +37,8 @@ Contributors
 * Jakub Stasiak
 * Aldona Majorek
 * Victor Sergeyev
 * Jakub Stasiak
 * Aldona Majorek
 * Victor Sergeyev
+* David Szotten
+* Victor Stinner
 
 Linden Lab Contributors
 -----------------------
 
 Linden Lab Contributors
 -----------------------
@@ -96,7 +98,7 @@ Thanks To
 * Astrum Kuo, python3 compatibility fixes; greenthread.unlink() method
 * Davanum Srinivas, Python3 compatibility fixes
 * Dmitriy Kruglyak, PyPy 2.3 compatibility fix
 * Astrum Kuo, python3 compatibility fixes; greenthread.unlink() method
 * Davanum Srinivas, Python3 compatibility fixes
 * Dmitriy Kruglyak, PyPy 2.3 compatibility fix
-* Jan Grant, Michael Kerrin, second simultaneous read (Github #94)
+* Jan Grant, Michael Kerrin, second simultaneous read (GH-94)
 * Simon Jagoe, Python3 octal literal fix
 * Tushar Gohad, wsgi: Support optional headers w/ "100 Continue" responses
 * raylu, fixing operator precedence bug in eventlet.wsgi
 * Simon Jagoe, Python3 octal literal fix
 * Tushar Gohad, wsgi: Support optional headers w/ "100 Continue" responses
 * raylu, fixing operator precedence bug in eventlet.wsgi
@@ -110,4 +112,10 @@ Thanks To
 * Steven Hardy
 * Stuart McLaren
 * Tomaz Muraus
 * Steven Hardy
 * Stuart McLaren
 * Tomaz Muraus
-* Victor Stinner
+* ChangBo Guo(gcb), fixing typos in the documentation (GH-194)
+* Marc Abramowitz, fixing the README so it renders correctly on PyPI (GH-183)
+* Shaun Stanworth, equal chance to acquire semaphore from different greenthreads (GH-136)
+* Lior Neudorfer, Make sure SSL retries are done using the exact same data buffer
+* Sean Dague, wsgi: Provide python logging compatibility
+* Tim Simmons, Use _socket_nodns and select in dnspython support
+* Antonio Cuni, fix fd double close on PyPy
similarity index 100%
rename from eventlet/LICENSE
rename to python-eventlet/LICENSE
similarity index 95%
rename from eventlet/NEWS
rename to python-eventlet/NEWS
index 47767c315d01b6c5923d9de8ac073d59f8f51aa0..4e8df12bad1d475aafd070acf796231ee4e1160b 100644 (file)
@@ -1,11 +1,40 @@
-0.16.1
+0.17.4
+======
+* ssl: incorrect initalization of default context; Thanks to stuart-mclaren
+
+0.17.3
+======
+* green.thread: Python3.3+ fixes; Thanks to Victor Stinner
+* Semaphore.acquire() accepts timeout=-1; Thanks to Victor Stinner
+
+0.17.2
+======
+* wsgi: Provide python logging compatibility; Thanks to Sean Dague
+* greendns: fix premature connection closing in DNS proxy; Thanks to Tim Simmons
+* greenio: correct fd close; Thanks to Antonio Cuni and Victor Sergeyev
+* green.ssl: HTTPS client Python 2.7.9+ compatibility
+* setup: tests.{isolated,manual} polluted top-level packages
+
+0.17.1
 ======
 ======
+* greendns: fix dns.name import and Python3 compatibility
+
+0.17
+====
+* Full Python3 compatibility; Thanks to Jakub Stasiak
+* greendns: IPv6 support, improved handling of /etc/hosts; Thanks to Floris Bruynooghe
+* tpool: make sure we return results during killall; Thanks to David Szotten
+* semaphore: Don't hog a semaphore if someone else is waiting for it; Thanks to Shaun Stanworth
+* green.socket: create_connection() was wrapping all exceptions in socket.error; Thanks to Donagh McCabe
+* Make sure SSL retries are done using the exact same data buffer; Thanks to Lior Neudorfer
+* greenio: shutdown already closed sockets without error; Thanks to David Szotten
 
 
+0.16.1
+======
 * Wheel build 0.16.0 incorrectly shipped removed module eventlet.util.
 
 0.16.0
 ======
 * Wheel build 0.16.0 incorrectly shipped removed module eventlet.util.
 
 0.16.0
 ======
-
 * Fix SSL socket wrapping and Python 2.7.9 compatibility; Thanks to Jakub Stasiak
 * Fix monkey_patch() on Python 3; Thanks to Victor Stinner
 * Fix "maximum recursion depth exceeded in GreenSocket.__del__"; Thanks to Jakub Stasiak
 * Fix SSL socket wrapping and Python 2.7.9 compatibility; Thanks to Jakub Stasiak
 * Fix monkey_patch() on Python 3; Thanks to Victor Stinner
 * Fix "maximum recursion depth exceeded in GreenSocket.__del__"; Thanks to Jakub Stasiak
similarity index 94%
rename from eventlet/README.rst
rename to python-eventlet/README.rst
index ce554ed620d15815197461e7ddf1fe618e5df13d..07da6658b95a0e503b0748b7142847d61d403d5a 100644 (file)
@@ -59,6 +59,7 @@ now it is not supported, so with apologies for any inconvenience we discontinue
 If you have a project that uses Eventlet with Twisted, your options are:
 
 * use last working release eventlet==0.14
 If you have a project that uses Eventlet with Twisted, your options are:
 
 * use last working release eventlet==0.14
-* start a new project with only Twisted hub code, identify and fix problems. As of eventlet 0.13,
-`EVENTLET_HUB` environment variable can point to external modules.
+* start a new project with only Twisted hub code, identify and fix problems. As of eventlet 0.13, `EVENTLET_HUB` environment variable can point to external modules.
 * fork Eventlet, revert Twisted removal, identify and fix problems. This work may be merged back into main project.
 * fork Eventlet, revert Twisted removal, identify and fix problems. This work may be merged back into main project.
+
+Apologies for any inconvenience.
diff --git a/python-eventlet/benchmarks/__init__.py b/python-eventlet/benchmarks/__init__.py
new file mode 100644 (file)
index 0000000..0e3c338
--- /dev/null
@@ -0,0 +1,26 @@
+import gc
+import timeit
+import random
+
+from eventlet.support import six
+
+
+def measure_best(repeat, iters,
+                 common_setup='pass',
+                 common_cleanup='pass',
+                 *funcs):
+    funcs = list(funcs)
+    results = dict([(f, []) for f in funcs])
+
+    for i in six.moves.range(repeat):
+        random.shuffle(funcs)
+        for func in funcs:
+            gc.collect()
+            t = timeit.Timer(func, setup=common_setup)
+            results[func].append(t.timeit(iters))
+            common_cleanup()
+
+    best_results = {}
+    for func, times in six.iteritems(results):
+        best_results[func] = min(times)
+    return best_results
diff --git a/python-eventlet/benchmarks/context.py b/python-eventlet/benchmarks/context.py
new file mode 100644 (file)
index 0000000..d9b564d
--- /dev/null
@@ -0,0 +1,97 @@
+"""Test context switching performance of threading and eventlet"""
+from __future__ import print_function
+
+import threading
+import time
+
+import eventlet
+from eventlet import hubs
+from eventlet.hubs import pyevent, epolls, poll, selects
+
+
+CONTEXT_SWITCHES = 100000
+
+
+def run(event, wait_event):
+    counter = 0
+    while counter <= CONTEXT_SWITCHES:
+        wait_event.wait()
+        wait_event.reset()
+        counter += 1
+        event.send()
+
+
+def test_eventlet():
+    event1 = eventlet.event.Event()
+    event2 = eventlet.event.Event()
+    event1.send()
+    thread1 = eventlet.spawn(run, event1, event2)
+    thread2 = eventlet.spawn(run, event2, event1)
+
+    thread1.wait()
+    thread2.wait()
+
+
+class BenchThread(threading.Thread):
+    def __init__(self, event, wait_event):
+        threading.Thread.__init__(self)
+        self.counter = 0
+        self.event = event
+        self.wait_event = wait_event
+
+    def run(self):
+        while self.counter <= CONTEXT_SWITCHES:
+            self.wait_event.wait()
+            self.wait_event.clear()
+            self.counter += 1
+            self.event.set()
+
+
+def test_thread():
+    event1 = threading.Event()
+    event2 = threading.Event()
+    event1.set()
+    thread1 = BenchThread(event1, event2)
+    thread2 = BenchThread(event2, event1)
+    thread1.start()
+    thread2.start()
+    thread1.join()
+    thread2.join()
+
+
+print("Testing with %d context switches" % CONTEXT_SWITCHES)
+start = time.time()
+test_thread()
+print("threading: %.02f seconds" % (time.time() - start))
+
+try:
+    hubs.use_hub(pyevent)
+    start = time.time()
+    test_eventlet()
+    print("pyevent:   %.02f seconds" % (time.time() - start))
+except:
+    print("pyevent hub unavailable")
+
+try:
+    hubs.use_hub(epolls)
+    start = time.time()
+    test_eventlet()
+    print("epoll:     %.02f seconds" % (time.time() - start))
+except:
+    print("epoll hub unavailable")
+
+try:
+    hubs.use_hub(poll)
+    start = time.time()
+    test_eventlet()
+    print("poll:      %.02f seconds" % (time.time() - start))
+except:
+    print("poll hub unavailable")
+
+try:
+    hubs.use_hub(selects)
+    start = time.time()
+    test_eventlet()
+    print("select:    %.02f seconds" % (time.time() - start))
+except:
+    print("select hub unavailable")
diff --git a/python-eventlet/benchmarks/hub_timers.py b/python-eventlet/benchmarks/hub_timers.py
new file mode 100644 (file)
index 0000000..f5a2b81
--- /dev/null
@@ -0,0 +1,46 @@
+#! /usr/bin/env python
+from __future__ import print_function
+
+# test timer adds & expires on hubs.hub.BaseHub
+
+import sys
+import eventlet
+import random
+import time
+
+from eventlet.hubs import timer, get_hub
+from eventlet.support import six
+
+
+timer_count = 100000
+
+if len(sys.argv) >= 2:
+    timer_count = int(sys.argv[1])
+
+l = []
+
+
+def work(n):
+    l.append(n)
+
+timeouts = [random.uniform(0, 10) for x in six.moves.range(timer_count)]
+
+hub = get_hub()
+
+start = time.time()
+
+scheduled = []
+
+for timeout in timeouts:
+    t = timer.Timer(timeout, work, timeout)
+    t.schedule()
+
+    scheduled.append(t)
+
+hub.prepare_timers()
+hub.fire_timers(time.time() + 11)
+hub.prepare_timers()
+
+end = time.time()
+
+print("Duration: %f" % (end - start,))
diff --git a/python-eventlet/benchmarks/localhost_socket.py b/python-eventlet/benchmarks/localhost_socket.py
new file mode 100644 (file)
index 0000000..2c1a658
--- /dev/null
@@ -0,0 +1,117 @@
+"""Benchmark evaluating eventlet's performance at speaking to itself over a localhost socket."""
+from __future__ import print_function
+
+import time
+
+import benchmarks
+from eventlet.support import six
+
+
+BYTES = 1000
+SIZE = 1
+CONCURRENCY = 50
+TRIES = 5
+
+
+def reader(sock):
+    expect = BYTES
+    while expect > 0:
+        d = sock.recv(min(expect, SIZE))
+        expect -= len(d)
+
+
+def writer(addr, socket_impl):
+    sock = socket_impl(socket.AF_INET, socket.SOCK_STREAM)
+    sock.connect(addr)
+    sent = 0
+    while sent < BYTES:
+        d = 'xy' * (max(min(SIZE / 2, BYTES - sent), 1))
+        sock.sendall(d)
+        sent += len(d)
+
+
+def green_accepter(server_sock, pool):
+    for i in six.moves.range(CONCURRENCY):
+        sock, addr = server_sock.accept()
+        pool.spawn_n(reader, sock)
+
+
+def heavy_accepter(server_sock, pool):
+    for i in six.moves.range(CONCURRENCY):
+        sock, addr = server_sock.accept()
+        t = threading.Thread(None, reader, "reader thread", (sock,))
+        t.start()
+        pool.append(t)
+
+
+import eventlet.green.socket
+import eventlet
+
+from eventlet import debug
+debug.hub_exceptions(True)
+
+
+def launch_green_threads():
+    pool = eventlet.GreenPool(CONCURRENCY * 2 + 1)
+    server_sock = eventlet.green.socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    server_sock.bind(('localhost', 0))
+    server_sock.listen(50)
+    addr = ('localhost', server_sock.getsockname()[1])
+    pool.spawn_n(green_accepter, server_sock, pool)
+    for i in six.moves.range(CONCURRENCY):
+        pool.spawn_n(writer, addr, eventlet.green.socket.socket)
+    pool.waitall()
+
+
+import threading
+import socket
+
+
+def launch_heavy_threads():
+    threads = []
+    server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    server_sock.bind(('localhost', 0))
+    server_sock.listen(50)
+    addr = ('localhost', server_sock.getsockname()[1])
+    accepter_thread = threading.Thread(
+        None, heavy_accepter, "accepter thread", (server_sock, threads))
+    accepter_thread.start()
+    threads.append(accepter_thread)
+    for i in six.moves.range(CONCURRENCY):
+        client_thread = threading.Thread(None, writer, "writer thread", (addr, socket.socket))
+        client_thread.start()
+        threads.append(client_thread)
+    for t in threads:
+        t.join()
+
+
+if __name__ == "__main__":
+    import optparse
+    parser = optparse.OptionParser()
+    parser.add_option('--compare-threading', action='store_true', dest='threading', default=False)
+    parser.add_option('-b', '--bytes', type='int', dest='bytes',
+                      default=BYTES)
+    parser.add_option('-s', '--size', type='int', dest='size',
+                      default=SIZE)
+    parser.add_option('-c', '--concurrency', type='int', dest='concurrency',
+                      default=CONCURRENCY)
+    parser.add_option('-t', '--tries', type='int', dest='tries',
+                      default=TRIES)
+
+    opts, args = parser.parse_args()
+    BYTES = opts.bytes
+    SIZE = opts.size
+    CONCURRENCY = opts.concurrency
+    TRIES = opts.tries
+
+    funcs = [launch_green_threads]
+    if opts.threading:
+        funcs = [launch_green_threads, launch_heavy_threads]
+    results = benchmarks.measure_best(TRIES, 3,
+                                      lambda: None, lambda: None,
+                                      *funcs)
+    print("green:", results[launch_green_threads])
+    if opts.threading:
+        print("threads:", results[launch_heavy_threads])
+        print("%", (results[launch_green_threads] - results[launch_heavy_threads]
+                    ) / results[launch_heavy_threads] * 100)
diff --git a/python-eventlet/benchmarks/spawn.py b/python-eventlet/benchmarks/spawn.py
new file mode 100644 (file)
index 0000000..3e6fdde
--- /dev/null
@@ -0,0 +1,86 @@
+"""Compare spawn to spawn_n"""
+from __future__ import print_function
+
+import eventlet
+import benchmarks
+
+
+def cleanup():
+    eventlet.sleep(0.2)
+
+
+iters = 10000
+best = benchmarks.measure_best(
+    5, iters,
+    'pass',
+    cleanup,
+    eventlet.sleep)
+print("eventlet.sleep (main)", best[eventlet.sleep])
+
+gt = eventlet.spawn(
+    benchmarks.measure_best, 5, iters,
+    'pass',
+    cleanup,
+    eventlet.sleep)
+best = gt.wait()
+print("eventlet.sleep (gt)", best[eventlet.sleep])
+
+
+def dummy(i=None):
+    return i
+
+
+def run_spawn():
+    eventlet.spawn(dummy, 1)
+
+
+def run_spawn_n():
+    eventlet.spawn_n(dummy, 1)
+
+
+def run_spawn_n_kw():
+    eventlet.spawn_n(dummy, i=1)
+
+
+best = benchmarks.measure_best(
+    5, iters,
+    'pass',
+    cleanup,
+    run_spawn_n,
+    run_spawn,
+    run_spawn_n_kw)
+print("eventlet.spawn", best[run_spawn])
+print("eventlet.spawn_n", best[run_spawn_n])
+print("eventlet.spawn_n(**kw)", best[run_spawn_n_kw])
+print("%% %0.1f" % ((best[run_spawn] - best[run_spawn_n]) / best[run_spawn_n] * 100))
+
+pool = None
+
+
+def setup():
+    global pool
+    pool = eventlet.GreenPool(iters)
+
+
+def run_pool_spawn():
+    pool.spawn(dummy, 1)
+
+
+def run_pool_spawn_n():
+    pool.spawn_n(dummy, 1)
+
+
+def cleanup_pool():
+    pool.waitall()
+
+
+best = benchmarks.measure_best(
+    3, iters,
+    setup,
+    cleanup_pool,
+    run_pool_spawn,
+    run_pool_spawn_n,
+)
+print("eventlet.GreenPool.spawn", best[run_pool_spawn])
+print("eventlet.GreenPool.spawn_n", best[run_pool_spawn_n])
+print("%% %0.1f" % ((best[run_pool_spawn] - best[run_pool_spawn_n]) / best[run_pool_spawn_n] * 100))
diff --git a/python-eventlet/benchmarks/spawn_plot.py b/python-eventlet/benchmarks/spawn_plot.py
new file mode 100644 (file)
index 0000000..349b0e9
--- /dev/null
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+'''
+    Compare spawn to spawn_n, among other things.
+
+    This script will generate a number of "properties" files for the
+    Hudson plot plugin
+'''
+
+import os
+import eventlet
+import benchmarks
+
+DATA_DIR = 'plot_data'
+
+if not os.path.exists(DATA_DIR):
+    os.makedirs(DATA_DIR)
+
+
+def write_result(filename, best):
+    fd = open(os.path.join(DATA_DIR, filename), 'w')
+    fd.write('YVALUE=%s' % best)
+    fd.close()
+
+
+def cleanup():
+    eventlet.sleep(0.2)
+
+iters = 10000
+best = benchmarks.measure_best(
+    5, iters,
+    'pass',
+    cleanup,
+    eventlet.sleep)
+
+write_result('eventlet.sleep_main', best[eventlet.sleep])
+
+gt = eventlet.spawn(
+    benchmarks.measure_best, 5, iters,
+    'pass',
+    cleanup,
+    eventlet.sleep)
+best = gt.wait()
+write_result('eventlet.sleep_gt', best[eventlet.sleep])
+
+
+def dummy(i=None):
+    return i
+
+
+def run_spawn():
+    eventlet.spawn(dummy, 1)
+
+
+def run_spawn_n():
+    eventlet.spawn_n(dummy, 1)
+
+
+def run_spawn_n_kw():
+    eventlet.spawn_n(dummy, i=1)
+
+
+best = benchmarks.measure_best(
+    5, iters,
+    'pass',
+    cleanup,
+    run_spawn_n,
+    run_spawn,
+    run_spawn_n_kw)
+write_result('eventlet.spawn', best[run_spawn])
+write_result('eventlet.spawn_n', best[run_spawn_n])
+write_result('eventlet.spawn_n_kw', best[run_spawn_n_kw])
+
+pool = None
+
+
+def setup():
+    global pool
+    pool = eventlet.GreenPool(iters)
+
+
+def run_pool_spawn():
+    pool.spawn(dummy, 1)
+
+
+def run_pool_spawn_n():
+    pool.spawn_n(dummy, 1)
+
+
+def cleanup_pool():
+    pool.waitall()
+
+
+best = benchmarks.measure_best(
+    3, iters,
+    setup,
+    cleanup_pool,
+    run_pool_spawn,
+    run_pool_spawn_n,
+)
+write_result('eventlet.GreenPool.spawn', best[run_pool_spawn])
+write_result('eventlet.GreenPool.spawn_n', best[run_pool_spawn_n])
diff --git a/python-eventlet/bin/build-website.bash b/python-eventlet/bin/build-website.bash
new file mode 100755 (executable)
index 0000000..8f461e0
--- /dev/null
@@ -0,0 +1,74 @@
+#!/bin/bash
+set -e
+
+build="$PWD/website-build"
+usage="Builds eventlet.net website static pages into ${build}.
+Requires sphinx-build, git and Github account.
+
+  --no-commit        Just build HTML, skip any git operations."
+
+commit=1
+while [ -n "$1" ]; do
+    case $1 in
+    --no-commit)
+        commit=0
+        ;;
+    *)
+        echo "$usage" >&2
+        exit 1
+        ;;
+    esac
+       shift
+done
+
+if ! which sphinx-build >/dev/null; then
+       echo "sphinx-build not found. Possible solution: pip install sphinx" >&2
+       echo "Links: http://sphinx-doc.org/" >&2
+       exit 1
+fi
+
+if [ $commit -eq 1 ] && ! git status >/dev/null; then
+       echo "git not found. git and Github account are required to update online documentation." >&2
+       echo "Links: http://git-scm.com/ https://github.com/" >&2
+       exit 1
+fi
+
+echo "1. clean"
+rm -rf "$build"
+mkdir -p "$build/doc"
+
+echo "2. build static pages"
+cp doc/real_index.html "$build/index.html"
+cp NEWS doc/changelog.rst
+
+# -b html -- builder, output mode
+# -d dir  -- path to doctrees cache
+# -n      -- nit-picky mode (kind of -Wall for gcc)
+# -W      -- turns warnings into errors
+# -q      -- quiet, emit only warnings and errors
+sphinx-build -b html -d "$build/tmp" -n -q "doc" "$build/doc"
+rm -rf "$build/tmp"
+rm -f "$build/doc/.buildinfo"
+rm -f "doc/changelog.rst"
+
+if [ $commit -eq 1 ]; then
+    echo "3. Updating git branch gh-pages"
+    source_name=`git rev-parse --abbrev-ref HEAD`
+    source_id=`git rev-parse --short HEAD`
+    git branch --track gh-pages origin/gh-pages || true
+    git checkout gh-pages
+    git ls-files |grep -Ev '^.gitignore$' |xargs rm -f
+    rm -rf "doc"
+
+    mv "$build"/* ./
+    touch ".nojekyll"
+    echo "eventlet.net" >"CNAME"
+    rmdir "$build"
+
+    echo "4. Commit"
+    git add -A
+    git status
+
+    read -p "Carefully read git status output above, press Enter to continue or Ctrl+C to abort"
+    git commit --edit -m "Website built from $source_name $source_id"
+fi
diff --git a/python-eventlet/bin/release b/python-eventlet/bin/release
new file mode 100755 (executable)
index 0000000..6480e7a
--- /dev/null
@@ -0,0 +1,50 @@
+#!/bin/bash -e
+cd "$( dirname "${BASH_SOURCE[0]}" )/.."
+if [[ ! -d venv-release ]]; then
+       virtualenv venv-release
+       echo '*' >venv-release/.gitignore
+       venv-release/bin/pip install wheel sphinx
+fi
+. $PWD/venv-release/bin/activate
+pip install -e $PWD
+
+main() {
+       branch="${1-$(git symbolic-ref --short HEAD)}"
+       version="$(python -c 'import eventlet; print(eventlet.__version__)')"
+       printf "branch: %s version: '%s'\n" $branch $version >&2
+       if [[ "$branch" != "master" ]]; then
+               echo "Must be on master" >&2
+               exit 1
+       fi
+       if [[ -n "$(git status --short -uall)" ]]; then
+               echo "Tree must be clean" >&2
+               exit 1
+       fi
+       confirm "Continue? [yN] " || exit 1
+
+       if ! git tag "v$version"; then
+               echo "tag failed" >&2
+               confirm "Continue still? [yN] " || exit 1
+       fi
+
+       if confirm "Upload to PyPi? [Yn] "; then
+               rm -rf build dist
+               python setup.py sdist bdist_wheel register upload
+       fi
+
+       bin/build-website.bash
+
+       git push origin master
+       git push --tags
+       git push origin gh-pages
+}
+
+confirm() {
+       read -n1 -p "$1" reply
+       echo ""
+       rc=0
+       [[ "$reply" != "y" ]] && rc=1
+       return $rc
+}
+
+main "$@"
diff --git a/python-eventlet/doc/_templates/layout.html b/python-eventlet/doc/_templates/layout.html
new file mode 100644 (file)
index 0000000..7c2c26d
--- /dev/null
@@ -0,0 +1,14 @@
+{% extends "!layout.html" %}
+
+{% block footer %}
+{{ super() }}
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+
+  ga('create', 'UA-42952223-1', 'eventlet.net');
+  ga('send', 'pageview');
+</script>
+{% endblock %}
diff --git a/python-eventlet/doc/make.bat b/python-eventlet/doc/make.bat
new file mode 100644 (file)
index 0000000..cfc1e8e
--- /dev/null
@@ -0,0 +1,112 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+set SPHINXBUILD=sphinx-build
+set ALLSPHINXOPTS=-d _build/doctrees %SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+       set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+       :help
+       echo.Please use `make ^<target^>` where ^<target^> is one of
+       echo.  html      to make standalone HTML files
+       echo.  dirhtml   to make HTML files named index.html in directories
+       echo.  pickle    to make pickle files
+       echo.  json      to make JSON files
+       echo.  htmlhelp  to make HTML files and a HTML help project
+       echo.  qthelp    to make HTML files and a qthelp project
+       echo.  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+       echo.  changes   to make an overview over all changed/added/deprecated items
+       echo.  linkcheck to check all external links for integrity
+       echo.  doctest   to run all doctests embedded in the documentation if enabled
+       goto end
+)
+
+if "%1" == "clean" (
+       for /d %%i in (_build\*) do rmdir /q /s %%i
+       del /q /s _build\*
+       goto end
+)
+
+if "%1" == "html" (
+       %SPHINXBUILD% -b html %ALLSPHINXOPTS% _build/html
+       echo.
+       echo.Build finished. The HTML pages are in _build/html.
+       goto end
+)
+
+if "%1" == "dirhtml" (
+       %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% _build/dirhtml
+       echo.
+       echo.Build finished. The HTML pages are in _build/dirhtml.
+       goto end
+)
+
+if "%1" == "pickle" (
+       %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% _build/pickle
+       echo.
+       echo.Build finished; now you can process the pickle files.
+       goto end
+)
+
+if "%1" == "json" (
+       %SPHINXBUILD% -b json %ALLSPHINXOPTS% _build/json
+       echo.
+       echo.Build finished; now you can process the JSON files.
+       goto end
+)
+
+if "%1" == "htmlhelp" (
+       %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% _build/htmlhelp
+       echo.
+       echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in _build/htmlhelp.
+       goto end
+)
+
+if "%1" == "qthelp" (
+       %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% _build/qthelp
+       echo.
+       echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in _build/qthelp, like this:
+       echo.^> qcollectiongenerator _build\qthelp\Eventlet.qhcp
+       echo.To view the help file:
+       echo.^> assistant -collectionFile _build\qthelp\Eventlet.ghc
+       goto end
+)
+
+if "%1" == "latex" (
+       %SPHINXBUILD% -b latex %ALLSPHINXOPTS% _build/latex
+       echo.
+       echo.Build finished; the LaTeX files are in _build/latex.
+       goto end
+)
+
+if "%1" == "changes" (
+       %SPHINXBUILD% -b changes %ALLSPHINXOPTS% _build/changes
+       echo.
+       echo.The overview file is in _build/changes.
+       goto end
+)
+
+if "%1" == "linkcheck" (
+       %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% _build/linkcheck
+       echo.
+       echo.Link check complete; look for any errors in the above output ^
+or in _build/linkcheck/output.txt.
+       goto end
+)
+
+if "%1" == "doctest" (
+       %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% _build/doctest
+       echo.
+       echo.Testing of doctests in the sources finished, look at the ^
+results in _build/doctest/output.txt.
+       goto end
+)
+
+:end
diff --git a/python-eventlet/doc/real_index.html b/python-eventlet/doc/real_index.html
new file mode 100644 (file)
index 0000000..d9a7d60
--- /dev/null
@@ -0,0 +1,181 @@
+<!doctype html>
+
+<html>
+  <head>
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+    <title>Eventlet Networking Library</title>
+    <link rel="stylesheet" href="doc/_static/default.css" type="text/css" />
+    <link rel="stylesheet" href="doc/_static/pygments.css" type="text/css" />
+    <link rel="stylesheet" href="https://yandex.st/highlightjs/7.3/styles/default.min.css">
+    <link rel="top" title="Eventlet Networking Library" href="" />
+  </head>
+  <body>
+  <script>
+    (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+    (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+    m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+    })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+
+    ga('create', 'UA-42952223-1', 'eventlet.net');
+    ga('send', 'pageview');
+  </script>
+
+    <div class="document">
+      <div class="documentwrapper">
+        <div class="bodywrapper">
+          <div class="body">
+
+  <div class="section" id="eventlet">
+<h1>Eventlet</h1>
+
+<p>Eventlet is a concurrent networking library for Python that allows you to change how you run your code, not how you write it.</p>
+
+<ul>
+<li>It uses epoll or kqueue or libevent for <a class="reference external" target="_blank" href="http://en.wikipedia.org/wiki/Asynchronous_I/O#Select.28.2Fpoll.29_loops">highly scalable non-blocking I/O</a>.</li>
+<li><a class="reference external" target="_blank" href="http://en.wikipedia.org/wiki/Coroutine">Coroutines</a> ensure that the developer uses a blocking style of programming that is similar to threading, but provide the benefits of non-blocking I/O.</li>
+<li>The event dispatch is implicit, which means you can easily use Eventlet from the Python interpreter, or as a small part of a larger application.</li>
+</ul>
+
+<p>It's easy to get started using Eventlet, and easy to convert existing applications to use it. Start off by looking at <a href="doc/examples.html">examples</a>, <a href="doc/design_patterns.html">common design patterns</a>, and the list of the <a href="doc/basic_usage.html">basic API primitives</a>.</p>
+
+<p>License: MIT.</p>
+
+<h3><a href="doc/">API Documentation</a></h3>
+
+
+<h3>Installation</h3>
+
+<p>To install eventlet, simply:
+<pre>
+pip install eventlet
+</pre></p>
+
+<p>Alternately, you can download the source tarball:</p>
+<ul>
+<li>latest release from <a class="reference external" target="_blank" href="https://pypi.python.org/pypi/eventlet/">PyPi</a>:
+  <a class="reference external" href="https://pypi.python.org/packages/source/e/eventlet/eventlet-0.17.4.tar.gz">eventlet-0.17.4.tar.gz</a></li>
+<li>or <a class="reference external" href="https://github.com/eventlet/eventlet/archive/master.zip">latest development version</a></li>
+</ul>
+
+
+<h3>Discussion</h3>
+
+<ul>
+<li>
+  <p><a class="reference external" target="_blank" href="https://lists.secondlife.com/cgi-bin/mailman/listinfo/eventletdev">eventletdev at lists.secondlife.com</a></p>
+  <p>This is a low traffic list about using and developing Eventlet. Look through the <a class="reference external" target="_blank" href="https://lists.secondlife.com/pipermail/eventletdev/">archives</a> for some useful information and possible answers to questions.</p>
+</li>
+<li>There's an IRC channel dedicated to Eventlet: <a class="reference external" target="_blank" href="irc://kubrick.freenode.net/#eventlet">#eventlet on freenode</a>.  It's a pretty chill place to hang out!</li>
+<li>We have <a class="reference external" target="_blank" href="https://plus.google.com/communities/102444398246193806164">Eventlet Google+ Community</a>. Join us, +1, share your ideas, report bugs, find new friends or even new job!</li>
+</ul>
+
+
+<h3>Development</h3>
+
+<ul>
+<li><a class="reference" target="_blank" href="https://github.com/eventlet/eventlet/">Eventlet on Github</a></li>
+<li><a class="reference external" target="_blank" href="https://bitbucket.org/eventlet/eventlet/">Mercurial on Bitbucket</a>, "eventually consistent" mirror.</li>
+</ul>
+<p>Both repositories are equal and kept in sync.
+  You can use whichever you fancy for downloading, forking, reporting issues and submitting pull requests.</p>
+<p>Mercurial repository used to be the main one, but most of the contribution and discussions happen on Github nowadays.</p>
+
+<h4>Pull request policy</h4>
+<ul>
+  <li>Test is required</li>
+  <li>One commit is strongly preferred, except for very big changes</li>
+  <li>Commit message should follow the following formula:
+<pre>
+subsystem: description of why the change is useful
+
+optional details or links to related issues or websites
+</pre>
+    The <em>why</em> part is very important. Diff already says <em>what</em> you have done. But nobody knows why.
+  </li>
+  <li>Feel free to append yourself into AUTHORS file, sections Thanks To or Contributors.
+</ul>
+<p>If you don't like these rules, raw patches are more than welcome!</p>
+
+
+<h4>Bugs</h4>
+
+<p>Please be sure to report bugs <a class="reference external" target="_blank" href="http://www.chiark.greenend.org.uk/~sgtatham/bugs.html">as effectively as possible</a>, to ensure that we understand and act on them quickly.</p>
+
+<p>You may report bugs via:
+<ol>
+  <li><a class="reference external" target="_blank" href="https://github.com/eventlet/eventlet/issues/new">Github</a></li>
+  <li><a class="reference external" target="_blank" href="https://bitbucket.org/eventlet/eventlet/issues/new/">Bitbucket</a> (no registration is required)</li>
+  <li><a class="reference external" target="_blank" href="mailto:eventletdev@lists.secondlife.com">Email eventletdev@lists.secondlife.com</a></li>
+</ol>
+
+
+<div class="section" id="web-crawler-example">
+<h2>Web Crawler Example<a class="headerlink" href="#web-crawler-example" title="Permalink to this headline">¶</a></h2>
+<p>This is a simple web &#8220;crawler&#8221; that fetches a bunch of urls using a coroutine pool.  It has as much concurrency (i.e. pages being fetched simultaneously) as coroutines in the pool.</p>
+
+<pre><code class="language-python">import eventlet
+from eventlet.green import urllib2
+
+
+urls = [
+    "http://www.google.com/intl/en_ALL/images/logo.gif",
+    "https://wiki.secondlife.com/w/images/secondlife.jpg",
+    "http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif",
+]
+
+
+def fetch(url):
+    return urllib2.urlopen(url).read()
+
+
+pool = eventlet.GreenPool()
+
+for body in pool.imap(fetch, urls):
+    print("got body", len(body))
+</code></pre>
+
+
+<h3>Stats</h3>
+<p><a class="reference external" target="_blank" href="https://travis-ci.org/eventlet/eventlet"><img alt="Travis build" src="https://travis-ci.org/eventlet/eventlet.svg?branch=master"></a></p>
+
+<!--
+Here we insert Ohloh Project Basic Stats widget.
+<script src="http://www.ohloh.net/p/480234/widgets/project_basic_stats.js"></script>
+
+Unfortunately, they use blocking javascript with document.write() which is a bit unacceptable.
+So instead I inserted the result of javascript write. It's not public API, so it may break in future.
+In case iframe is broken, try visiting script again and copy updated html from there.
+-->
+<iframe src="http://www.ohloh.net/p/480234/widgets/project_basic_stats.html" scrolling="no" marginHeight=0 marginWidth=0 style="height: 225px; width: 350px; border: none;"></iframe>
+</div>
+</div>
+</div>
+
+
+<div class="section" id="contents">
+</div>
+</div>
+</div>
+</div>
+<div class="sphinxsidebar">
+<div class="sphinxsidebarwrapper">
+<h3>Links</h3>
+<ul>
+<li><a class="reference external" href="doc/">Documentation</a></li>
+<li><a class="reference external" href="doc/changelog.html">Changelog</a></li>
+<li><a class="reference external" target="_blank" href="https://plus.google.com/communities/102444398246193806164">Google+ community</a></li>
+<li><a class="reference external" target="_blank" href="https://github.com/eventlet/eventlet/">Eventlet on Github</a></li>
+<li><a class="reference external" target="_blank" href="https://bitbucket.org/eventlet/eventlet/">Eventlet on Bitbucket</a></li>
+<li><a class="reference external" target="_blank" href="https://lists.secondlife.com/pipermail/eventletdev/">Mailing List Archives</a></li>
+<li><a class="reference external" target="_blank" href="http://build.eventlet.net/">Automated Builds</a></li>
+<li><a class="reference external" target="_blank" href="irc://chat.freenode.net/#eventlet">IRC channel</a></li>
+<li><a class="reference external" target="_blank" href="http://blog.eventlet.net/">Blog (archive)</a></li>
+</ul>
+        </div>
+      </div>
+    </div>
+    <script src="//yandex.st/highlightjs/7.3/highlight.min.js"></script>
+    <script>hljs.initHighlightingOnLoad();</script>
+  </body>
+</html>
similarity index 98%
rename from eventlet/eventlet/__init__.py
rename to python-eventlet/eventlet/__init__.py
index 7b7b3bf4392317d8d28d94b42ed1359f03785f4c..1bdcf52829a44fa0e8cbddea82c97cb2c7bce718 100644 (file)
@@ -1,4 +1,4 @@
-version_info = (0, 16, 1)
+version_info = (0, 17, 4)
 __version__ = '.'.join(map(str, version_info))
 
 try:
 __version__ = '.'.join(map(str, version_info))
 
 try:
similarity index 99%
rename from eventlet/eventlet/event.py
rename to python-eventlet/eventlet/event.py
index f06e0069269d96f640ae12a7a86402f0887c6a69..22d0aa1eb01ba0f4a9beb8bae0103c6dfb7d3e5f 100644 (file)
@@ -109,7 +109,7 @@ class Event(object):
         waited for result
 
         Returns immediately if the event has already
         waited for result
 
         Returns immediately if the event has already
-        occured.
+        occurred.
 
         >>> evt.wait()
         'result'
 
         >>> evt.wait()
         'result'
diff --git a/python-eventlet/eventlet/green/_socket_nodns.py b/python-eventlet/eventlet/green/_socket_nodns.py
new file mode 100644 (file)
index 0000000..8bfecfd
--- /dev/null
@@ -0,0 +1,31 @@
+__socket = __import__('socket')
+
+__all__ = __socket.__all__
+__patched__ = ['fromfd', 'socketpair', 'ssl', 'socket']
+
+from eventlet.patcher import slurp_properties
+slurp_properties(__socket, globals(),
+                 ignore=__patched__, srckeys=dir(__socket))
+
+os = __import__('os')
+import sys
+from eventlet.hubs import get_hub
+from eventlet.greenio import GreenSocket as socket
+from eventlet.greenio import _GLOBAL_DEFAULT_TIMEOUT
+
+try:
+    __original_fromfd__ = __socket.fromfd
+
+    def fromfd(*args):
+        return socket(__original_fromfd__(*args))
+except AttributeError:
+    pass
+
+try:
+    __original_socketpair__ = __socket.socketpair
+
+    def socketpair(*args):
+        one, two = __original_socketpair__(*args)
+        return socket(one), socket(two)
+except AttributeError:
+    pass
diff --git a/python-eventlet/eventlet/green/http/__init__.py b/python-eventlet/eventlet/green/http/__init__.py
new file mode 100644 (file)
index 0000000..c9e2a23
--- /dev/null
@@ -0,0 +1,2 @@
+from eventlet.support import six
+assert six.PY3, 'This is a Python 3 module'
diff --git a/python-eventlet/eventlet/green/http/client.py b/python-eventlet/eventlet/green/http/client.py
new file mode 100644 (file)
index 0000000..480a252
--- /dev/null
@@ -0,0 +1,9 @@
+from eventlet import patcher
+from eventlet.green import os, socket
+from eventlet.green.urllib import parse as urllib_parse
+
+patcher.inject('http.client', globals(),
+               ('os', os), ('socket', socket), ('urllib.parse', urllib_parse))
+
+del patcher
+del urllib_parse
diff --git a/python-eventlet/eventlet/green/http/cookiejar.py b/python-eventlet/eventlet/green/http/cookiejar.py
new file mode 100644 (file)
index 0000000..5e511d2
--- /dev/null
@@ -0,0 +1,13 @@
+from eventlet.green import threading, time
+from eventlet.green.http import client
+from eventlet.green.urllib import parse as urllib_parse, request as urllib_request
+from eventlet import patcher
+
+patcher.inject('http.cookiejar', globals(),
+               ('http.client', client), ('threading', threading),
+               ('urllib.parse', urllib_parse), ('urllib.request', urllib_request),
+               ('time', time))
+
+del urllib_request
+del urllib_parse
+del patcher
diff --git a/python-eventlet/eventlet/green/http/cookies.py b/python-eventlet/eventlet/green/http/cookies.py
new file mode 100644 (file)
index 0000000..e139069
--- /dev/null
@@ -0,0 +1,7 @@
+from eventlet import patcher
+from eventlet.green import time
+
+patcher.inject('http.cookies', globals())
+_getdate = patcher.patch_function(_getdate, ('time', time))
+
+del patcher
diff --git a/python-eventlet/eventlet/green/http/server.py b/python-eventlet/eventlet/green/http/server.py
new file mode 100644 (file)
index 0000000..35c3ab2
--- /dev/null
@@ -0,0 +1,17 @@
+from eventlet import patcher
+from eventlet.green import os, time, select, socket, SocketServer, subprocess
+from eventlet.green.http import client
+from eventlet.green.urllib import parse as urllib_parse
+
+patcher.inject('http.server', globals(),
+               ('http.client', client), ('os', os), ('select', select),
+               ('socket', socket), ('socketserver', SocketServer), ('time', time),
+               ('urllib.parse', urllib_parse))
+
+
+CGIHTTPRequestHandler.run_cgi = patcher.patch_function(
+    CGIHTTPRequestHandler.run_cgi, ('subprocess', subprocess))
+
+del urllib_parse
+del client
+del patcher
diff --git a/python-eventlet/eventlet/green/selectors.py b/python-eventlet/eventlet/green/selectors.py
new file mode 100644 (file)
index 0000000..26427ec
--- /dev/null
@@ -0,0 +1,11 @@
+import sys
+
+from eventlet import patcher
+from eventlet.green import select
+
+patcher.inject('selectors', globals(), ('select', select))
+
+del patcher
+
+if sys.platform != 'win32':
+    SelectSelector._select = staticmethod(select.select)
similarity index 92%
rename from eventlet/eventlet/green/socket.py
rename to python-eventlet/eventlet/green/socket.py
index 2ec9d1b244fe57c5aeb1fe3b5dedd2d18e0f5103..e8ef03251e7f9a5c3a4e766bf21681fcb4367bf3 100644 (file)
@@ -1,6 +1,6 @@
 import os
 import sys
 import os
 import sys
-from eventlet.hubs import get_hub
+
 __import__('eventlet.green._socket_nodns')
 __socket = sys.modules['eventlet.green._socket_nodns']
 
 __import__('eventlet.green._socket_nodns')
 __socket = sys.modules['eventlet.green._socket_nodns']
 
@@ -39,7 +39,7 @@ def create_connection(address,
     is used.
     """
 
     is used.
     """
 
-    msg = "getaddrinfo returns an empty list"
+    err = "getaddrinfo returns an empty list"
     host, port = address
     for res in getaddrinfo(host, port, 0, SOCK_STREAM):
         af, socktype, proto, canonname, sa = res
     host, port = address
     for res in getaddrinfo(host, port, 0, SOCK_STREAM):
         af, socktype, proto, canonname, sa = res
@@ -54,8 +54,10 @@ def create_connection(address,
             return sock
 
         except error as e:
             return sock
 
         except error as e:
-            msg = e
+            err = e
             if sock is not None:
                 sock.close()
 
             if sock is not None:
                 sock.close()
 
-    raise error(msg)
+    if not isinstance(err, error):
+        err = error(err)
+    raise err
similarity index 95%
rename from eventlet/eventlet/green/ssl.py
rename to python-eventlet/eventlet/green/ssl.py
index 963fbdb265af85483f7ff2bdcebc9a4bd98da866..ded65338c8d59bad2c7d26b7be9f21b3c81d979d 100644 (file)
@@ -3,6 +3,7 @@ __ssl = __import__('ssl')
 from eventlet.patcher import slurp_properties
 slurp_properties(__ssl, globals(), srckeys=dir(__ssl))
 
 from eventlet.patcher import slurp_properties
 slurp_properties(__ssl, globals(), srckeys=dir(__ssl))
 
+import functools
 import sys
 import errno
 time = __import__('time')
 import sys
 import errno
 time = __import__('time')
@@ -162,11 +163,14 @@ class GreenSSLSocket(_original_sslsocket):
                     self.__class__)
             amount = len(data)
             count = 0
                     self.__class__)
             amount = len(data)
             count = 0
+            data_to_send = data
             while (count < amount):
             while (count < amount):
-                v = self.send(data[count:])
+                v = self.send(data_to_send)
                 count += v
                 if v == 0:
                     trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
                 count += v
                 if v == 0:
                     trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
+                else:
+                    data_to_send = data[count:]
             return amount
         else:
             while True:
             return amount
         else:
             while True:
@@ -342,7 +346,7 @@ def wrap_socket(sock, *a, **kw):
 if hasattr(__ssl, 'sslwrap_simple'):
     def sslwrap_simple(sock, keyfile=None, certfile=None):
         """A replacement for the old socket.ssl function.  Designed
 if hasattr(__ssl, 'sslwrap_simple'):
     def sslwrap_simple(sock, keyfile=None, certfile=None):
         """A replacement for the old socket.ssl function.  Designed
-        for compability with Python 2.5 and earlier.  Will disappear in
+        for compatibility with Python 2.5 and earlier.  Will disappear in
         Python 3.0."""
         ssl_sock = GreenSSLSocket(sock, keyfile=keyfile, certfile=certfile,
                                   server_side=False,
         Python 3.0."""
         ssl_sock = GreenSSLSocket(sock, keyfile=keyfile, certfile=certfile,
                                   server_side=False,
@@ -350,3 +354,14 @@ if hasattr(__ssl, 'sslwrap_simple'):
                                   ssl_version=PROTOCOL_SSLv23,
                                   ca_certs=None)
         return ssl_sock
                                   ssl_version=PROTOCOL_SSLv23,
                                   ca_certs=None)
         return ssl_sock
+
+
+if hasattr(__ssl, 'SSLContext'):
+    @functools.wraps(__ssl.SSLContext.wrap_socket)
+    def _green_sslcontext_wrap_socket(self, sock, *a, **kw):
+        return GreenSSLSocket(sock, *a, _context=self, **kw)
+
+    # FIXME:
+    # * GreenSSLContext akin to GreenSSLSocket
+    # * make ssl.create_default_context() use modified SSLContext from globals as usual
+    __ssl.SSLContext.wrap_socket = _green_sslcontext_wrap_socket
similarity index 93%
rename from eventlet/eventlet/green/subprocess.py
rename to python-eventlet/eventlet/green/subprocess.py
index 1d7c49a3b2ebcb6d24df2c66d0fab69e909a650f..7ce38cfb11d1790faf1110992df8218a1f9cc8ce 100644 (file)
@@ -1,15 +1,21 @@
 import errno
 import errno
-import time
+import sys
 from types import FunctionType
 
 import eventlet
 from eventlet import greenio
 from eventlet import patcher
 from types import FunctionType
 
 import eventlet
 from eventlet import greenio
 from eventlet import patcher
-from eventlet.green import select
+from eventlet.green import select, threading, time
 from eventlet.support import six
 
 
 from eventlet.support import six
 
 
-patcher.inject('subprocess', globals(), ('select', select))
+to_patch = [('select', select), ('threading', threading), ('time', time)]
+
+if sys.version_info > (3, 4):
+    from eventlet.green import selectors
+    to_patch.append(('selectors', selectors))
+
+patcher.inject('subprocess', globals(), *to_patch)
 subprocess_orig = __import__("subprocess")
 
 
 subprocess_orig = __import__("subprocess")
 
 
similarity index 68%
rename from eventlet/eventlet/green/thread.py
rename to python-eventlet/eventlet/green/thread.py
index 236031089521045f7c298c2b79ecba115f4639a7..5c7446fa2ba91926d5d07bb150a5bb59a1c34099 100644 (file)
@@ -3,6 +3,7 @@ from eventlet.support.six.moves import _thread as __thread
 from eventlet.support import greenlets as greenlet, six
 from eventlet import greenthread
 from eventlet.semaphore import Semaphore as LockType
 from eventlet.support import greenlets as greenlet, six
 from eventlet import greenthread
 from eventlet.semaphore import Semaphore as LockType
+import sys
 
 
 __patched__ = ['get_ident', 'start_new_thread', 'start_new', 'allocate_lock',
 
 
 __patched__ = ['get_ident', 'start_new_thread', 'start_new', 'allocate_lock',
@@ -43,6 +44,29 @@ def __thread_body(func, args, kwargs):
 
 
 def start_new_thread(function, args=(), kwargs=None):
 
 
 def start_new_thread(function, args=(), kwargs=None):
+    if (sys.version_info >= (3, 4)
+            and getattr(function, '__module__', '') == 'threading'
+            and hasattr(function, '__self__')):
+        # Since Python 3.4, threading.Thread uses an internal lock
+        # automatically released when the python thread state is deleted.
+        # With monkey patching, eventlet uses green threads without python
+        # thread state, so the lock is not automatically released.
+        #
+        # Wrap _bootstrap_inner() to release explicitly the thread state lock
+        # when the thread completes.
+        thread = function.__self__
+        bootstrap_inner = thread._bootstrap_inner
+
+        def wrap_bootstrap_inner():
+            try:
+                bootstrap_inner()
+            finally:
+                # The lock can be cleared (ex: by a fork())
+                if thread._tstate_lock is not None:
+                    thread._tstate_lock.release()
+
+        thread._bootstrap_inner = wrap_bootstrap_inner
+
     kwargs = kwargs or {}
     g = greenthread.spawn_n(__thread_body, function, args, kwargs)
     return get_ident(g)
     kwargs = kwargs or {}
     g = greenthread.spawn_n(__thread_body, function, args, kwargs)
     return get_ident(g)
diff --git a/python-eventlet/eventlet/green/urllib/__init__.py b/python-eventlet/eventlet/green/urllib/__init__.py
new file mode 100644 (file)
index 0000000..7cb4ea6
--- /dev/null
@@ -0,0 +1,40 @@
+from eventlet import patcher
+from eventlet.green import socket
+from eventlet.green import time
+from eventlet.green import httplib
+from eventlet.green import ftplib
+from eventlet.support import six
+
+if six.PY2:
+    to_patch = [('socket', socket), ('httplib', httplib),
+                ('time', time), ('ftplib', ftplib)]
+    try:
+        from eventlet.green import ssl
+        to_patch.append(('ssl', ssl))
+    except ImportError:
+        pass
+
+    patcher.inject('urllib', globals(), *to_patch)
+    try:
+        URLopener
+    except NameError:
+        patcher.inject('urllib.request', globals(), *to_patch)
+
+
+    # patch a bunch of things that have imports inside the
+    # function body; this is lame and hacky but I don't feel
+    # too bad because urllib is a hacky pile of junk that no
+    # one should be using anyhow
+    URLopener.open_http = patcher.patch_function(URLopener.open_http, ('httplib', httplib))
+    if hasattr(URLopener, 'open_https'):
+        URLopener.open_https = patcher.patch_function(URLopener.open_https, ('httplib', httplib))
+
+    URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, ('ftplib', ftplib))
+    ftpwrapper.init = patcher.patch_function(ftpwrapper.init, ('ftplib', ftplib))
+    ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, ('ftplib', ftplib))
+
+    del patcher
+
+    # Run test program when run as a script
+    if __name__ == '__main__':
+        main()
diff --git a/python-eventlet/eventlet/green/urllib/error.py b/python-eventlet/eventlet/green/urllib/error.py
new file mode 100644 (file)
index 0000000..6913813
--- /dev/null
@@ -0,0 +1,4 @@
+from eventlet import patcher
+from eventlet.green.urllib import response
+patcher.inject('urllib.error', globals(), ('urllib.response', response))
+del patcher
diff --git a/python-eventlet/eventlet/green/urllib/parse.py b/python-eventlet/eventlet/green/urllib/parse.py
new file mode 100644 (file)
index 0000000..f3a8924
--- /dev/null
@@ -0,0 +1,3 @@
+from eventlet import patcher
+patcher.inject('urllib.parse', globals())
+del patcher
diff --git a/python-eventlet/eventlet/green/urllib/request.py b/python-eventlet/eventlet/green/urllib/request.py
new file mode 100644 (file)
index 0000000..8160bb9
--- /dev/null
@@ -0,0 +1,44 @@
+from eventlet import patcher
+from eventlet.green import ftplib, os, socket, time
+from eventlet.green.http import client as http_client
+from eventlet.green.urllib import error, parse, response
+
+# TODO should we also have green email version?
+# import email
+
+
+to_patch = [
+    ('http.client', http_client),
+    ('os', os),
+    ('socket', socket),
+    ('time', time),
+    ('urllib.error', error),
+    ('urllib.parse', parse),
+    ('urllib.response', response),
+]
+
+try:
+    from eventlet.green import ssl
+except ImportError:
+    pass
+else:
+    to_patch.append(('ssl', ssl))
+
+patcher.inject('urllib.request', globals(), *to_patch)
+del to_patch
+
+to_patch_in_functions = [('ftplib', ftplib)]
+del ftplib
+
+FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, *to_patch_in_functions)
+URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, *to_patch_in_functions)
+
+ftperrors = patcher.patch_function(ftperrors, *to_patch_in_functions)
+
+ftpwrapper.init = patcher.patch_function(ftpwrapper.init, *to_patch_in_functions)
+ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, *to_patch_in_functions)
+
+del error
+del parse
+del response
+del to_patch_in_functions
diff --git a/python-eventlet/eventlet/green/urllib/response.py b/python-eventlet/eventlet/green/urllib/response.py
new file mode 100644 (file)
index 0000000..f9aaba5
--- /dev/null
@@ -0,0 +1,3 @@
+from eventlet import patcher
+patcher.inject('urllib.response', globals())
+del patcher
similarity index 99%
rename from eventlet/eventlet/green/zmq.py
rename to python-eventlet/eventlet/green/zmq.py
index b08eabdc8431117a90fa9294950582f1a317dcd5..c1097fa758260093818a2c36a23400c6123bb284 100644 (file)
@@ -204,7 +204,7 @@ class Socket(_Socket):
         * send
         * recv
         * getsockopt
         * send
         * recv
         * getsockopt
-    To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or recieving
+    To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
     is deferred to the hub (using :func:`eventlet.hubs.trampoline`) if a
     ``zmq.EAGAIN`` (retry) error is raised
 
     is deferred to the hub (using :func:`eventlet.hubs.trampoline`) if a
     ``zmq.EAGAIN`` (retry) error is raised
 
diff --git a/python-eventlet/eventlet/greenio/__init__.py b/python-eventlet/eventlet/greenio/__init__.py
new file mode 100644 (file)
index 0000000..72cd33e
--- /dev/null
@@ -0,0 +1,8 @@
+from eventlet.support import six
+
+from eventlet.greenio.base import *  # noqa
+
+if six.PY2:
+    from eventlet.greenio.py2 import *  # noqa
+else:
+    from eventlet.greenio.py3 import *  # noqa
similarity index 71%
rename from eventlet/eventlet/greenio.py
rename to python-eventlet/eventlet/greenio/base.py
index f44096e91a56b9c93447a30a1db00713a8d4d141..8da51caa88f155062297e0c7290cd193dd27df9f 100644 (file)
@@ -7,9 +7,13 @@ import time
 import warnings
 
 from eventlet.support import get_errno, six
 import warnings
 
 from eventlet.support import get_errno, six
-from eventlet.hubs import trampoline, notify_close, notify_opened, IOClosed
+from eventlet.hubs import trampoline, notify_opened, IOClosed
 
 
-__all__ = ['GreenSocket', 'GreenPipe', 'shutdown_safe']
+__all__ = [
+    'GreenSocket', '_GLOBAL_DEFAULT_TIMEOUT', 'set_nonblocking',
+    'SOCKET_CLOSED', 'CONNECT_ERR', 'CONNECT_SUCCESS',
+    'shutdown_safe', 'SSL',
+]
 
 BUFFER_SIZE = 4096
 CONNECT_ERR = set((errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK))
 
 BUFFER_SIZE = 4096
 CONNECT_ERR = set((errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK))
@@ -17,11 +21,8 @@ CONNECT_SUCCESS = set((0, errno.EISCONN))
 if sys.platform[:3] == "win":
     CONNECT_ERR.add(errno.WSAEINVAL)   # Bug 67
 
 if sys.platform[:3] == "win":
     CONNECT_ERR.add(errno.WSAEINVAL)   # Bug 67
 
-if six.PY3:
-    from io import IOBase as file
-    _fileobject = socket.SocketIO
-elif six.PY2:
-    _fileobject = socket._fileobject
+if six.PY2:
+    _python2_fileobject = socket._fileobject
 
 
 def socket_connect(descriptor, address):
 
 
 def socket_connect(descriptor, address):
@@ -293,7 +294,7 @@ class GreenSocket(object):
     else:
         def makefile(self, *args, **kwargs):
             dupped = self.dup()
     else:
         def makefile(self, *args, **kwargs):
             dupped = self.dup()
-            res = _fileobject(dupped, *args, **kwargs)
+            res = _python2_fileobject(dupped, *args, **kwargs)
             if hasattr(dupped, "_drop"):
                 dupped._drop()
             return res
             if hasattr(dupped, "_drop"):
                 dupped._drop()
             return res
@@ -418,119 +419,11 @@ class GreenSocket(object):
             getattr(self.fd, '_sock', self.fd)._drop()
 
 
             getattr(self.fd, '_sock', self.fd)._drop()
 
 
-class _SocketDuckForFd(object):
-    """Class implementing all socket method used by _fileobject
-    in cooperative manner using low level os I/O calls.
-    """
-    _refcount = 0
-
-    def __init__(self, fileno):
-        self._fileno = fileno
-        notify_opened(fileno)
-        self._closed = False
-
-    def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
-        if self._closed:
-            # Don't trampoline if we're already closed.
-            raise IOClosed()
-        try:
-            return trampoline(fd, read=read, write=write, timeout=timeout,
-                              timeout_exc=timeout_exc,
-                              mark_as_closed=self._mark_as_closed)
-        except IOClosed:
-            # Our fileno has been obsoleted. Defang ourselves to
-            # prevent spurious closes.
-            self._mark_as_closed()
-            raise
-
-    def _mark_as_closed(self):
-        self._closed = True
-
-    @property
-    def _sock(self):
-        return self
-
-    def fileno(self):
-        return self._fileno
-
-    def recv(self, buflen):
-        while True:
-            try:
-                data = os.read(self._fileno, buflen)
-                return data
-            except OSError as e:
-                if get_errno(e) not in SOCKET_BLOCKING:
-                    raise IOError(*e.args)
-            self._trampoline(self, read=True)
-
-    def recv_into(self, buf, nbytes=0, flags=0):
-        if nbytes == 0:
-            nbytes = len(buf)
-        data = self.recv(nbytes)
-        buf[:nbytes] = data
-        return len(data)
-
-    def send(self, data):
-        while True:
-            try:
-                return os.write(self._fileno, data)
-            except OSError as e:
-                if get_errno(e) not in SOCKET_BLOCKING:
-                    raise IOError(*e.args)
-                else:
-                    trampoline(self, write=True)
-
-    def sendall(self, data):
-        len_data = len(data)
-        os_write = os.write
-        fileno = self._fileno
-        try:
-            total_sent = os_write(fileno, data)
-        except OSError as e:
-            if get_errno(e) != errno.EAGAIN:
-                raise IOError(*e.args)
-            total_sent = 0
-        while total_sent < len_data:
-            self._trampoline(self, write=True)
-            try:
-                total_sent += os_write(fileno, data[total_sent:])
-            except OSError as e:
-                if get_errno(e) != errno. EAGAIN:
-                    raise IOError(*e.args)
-
-    def __del__(self):
-        self._close()
-
-    def _close(self):
-        notify_close(self._fileno)
-        self._mark_as_closed()
-        try:
-            os.close(self._fileno)
-        except:
-            # os.close may fail if __init__ didn't complete
-            # (i.e file dscriptor passed to popen was invalid
-            pass
-
-    def __repr__(self):
-        return "%s:%d" % (self.__class__.__name__, self._fileno)
-
-    def _reuse(self):
-        self._refcount += 1
-
-    def _drop(self):
-        self._refcount -= 1
-        if self._refcount == 0:
-            self._close()
-    # Python3
-    _decref_socketios = _drop
-
-
-def _operationOnClosedFile(*args, **kwargs):
+def _operation_on_closed_file(*args, **kwargs):
     raise ValueError("I/O operation on closed file")
 
 
     raise ValueError("I/O operation on closed file")
 
 
-class GreenPipe(_fileobject):
-    """
+greenpipe_doc = """
     GreenPipe is a cooperative replacement for file class.
     It will cooperate on pipes. It will block on regular file.
     Differneces from file class:
     GreenPipe is a cooperative replacement for file class.
     It will cooperate on pipes. It will block on regular file.
     Differneces from file class:
@@ -542,103 +435,6 @@ class GreenPipe(_fileobject):
     - file argument can be descriptor, file name or file object.
     """
 
     - file argument can be descriptor, file name or file object.
     """
 
-    def __init__(self, f, mode='r', bufsize=-1):
-        if not isinstance(f, six.string_types + (int, file)):
-            raise TypeError('f(ile) should be int, str, unicode or file, not %r' % f)
-
-        if isinstance(f, six.string_types):
-            f = open(f, mode, 0)
-
-        if isinstance(f, int):
-            fileno = f
-            self._name = "<fd:%d>" % fileno
-        else:
-            fileno = os.dup(f.fileno())
-            self._name = f.name
-            if f.mode != mode:
-                raise ValueError('file.mode %r does not match mode parameter %r' % (f.mode, mode))
-            self._name = f.name
-            f.close()
-
-        super(GreenPipe, self).__init__(_SocketDuckForFd(fileno), mode)
-        set_nonblocking(self)
-        self.softspace = 0
-
-    @property
-    def name(self):
-        return self._name
-
-    def __repr__(self):
-        return "<%s %s %r, mode %r at 0x%x>" % (
-            self.closed and 'closed' or 'open',
-            self.__class__.__name__,
-            self.name,
-            self.mode,
-            (id(self) < 0) and (sys.maxint + id(self)) or id(self))
-
-    def close(self):
-        super(GreenPipe, self).close()
-        for method in [
-                'fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
-                'readline', 'readlines', 'seek', 'tell', 'truncate',
-                'write', 'xreadlines', '__iter__', '__next__', 'writelines']:
-            setattr(self, method, _operationOnClosedFile)
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, *args):
-        self.close()
-
-    def _get_readahead_len(self):
-        return len(self._rbuf.getvalue())
-
-    def _clear_readahead_buf(self):
-        len = self._get_readahead_len()
-        if len > 0:
-            self.read(len)
-
-    def tell(self):
-        self.flush()
-        try:
-            return os.lseek(self.fileno(), 0, 1) - self._get_readahead_len()
-        except OSError as e:
-            raise IOError(*e.args)
-
-    def seek(self, offset, whence=0):
-        self.flush()
-        if whence == 1 and offset == 0:  # tell synonym
-            return self.tell()
-        if whence == 1:  # adjust offset by what is read ahead
-            offset -= self._get_readahead_len()
-        try:
-            rv = os.lseek(self.fileno(), offset, whence)
-        except OSError as e:
-            raise IOError(*e.args)
-        else:
-            self._clear_readahead_buf()
-            return rv
-
-    if getattr(file, "truncate", None):  # not all OSes implement truncate
-        def truncate(self, size=-1):
-            self.flush()
-            if size == -1:
-                size = self.tell()
-            try:
-                rv = os.ftruncate(self.fileno(), size)
-            except OSError as e:
-                raise IOError(*e.args)
-            else:
-                self.seek(size)  # move position&clear buffer
-                return rv
-
-    def isatty(self):
-        try:
-            return os.isatty(self.fileno())
-        except OSError as e:
-            raise IOError(*e.args)
-
-
 # import SSL module here so we can refer to greenio.SSL.exceptionclass
 try:
     from OpenSSL import SSL
 # import SSL module here so we can refer to greenio.SSL.exceptionclass
 try:
     from OpenSSL import SSL
@@ -677,5 +473,5 @@ def shutdown_safe(sock):
     except socket.error as e:
         # we don't care if the socket is already closed;
         # this will often be the case in an http server context
     except socket.error as e:
         # we don't care if the socket is already closed;
         # this will often be the case in an http server context
-        if get_errno(e) != errno.ENOTCONN:
+        if get_errno(e) not in (errno.ENOTCONN, errno.EBADF):
             raise
             raise
diff --git a/python-eventlet/eventlet/greenio/py2.py b/python-eventlet/eventlet/greenio/py2.py
new file mode 100644 (file)
index 0000000..a0e9efe
--- /dev/null
@@ -0,0 +1,226 @@
+import errno
+import os
+
+from eventlet.greenio.base import (
+    _operation_on_closed_file,
+    greenpipe_doc,
+    set_nonblocking,
+    socket,
+    SOCKET_BLOCKING,
+)
+from eventlet.hubs import trampoline, notify_close, notify_opened, IOClosed
+from eventlet.support import get_errno, six
+
+__all__ = ['_fileobject', 'GreenPipe']
+
+_fileobject = socket._fileobject
+
+
+class GreenPipe(_fileobject):
+
+    __doc__ = greenpipe_doc
+
+    def __init__(self, f, mode='r', bufsize=-1):
+        if not isinstance(f, six.string_types + (int, file)):
+            raise TypeError('f(ile) should be int, str, unicode or file, not %r' % f)
+
+        if isinstance(f, six.string_types):
+            f = open(f, mode, 0)
+
+        if isinstance(f, int):
+            fileno = f
+            self._name = "<fd:%d>" % fileno
+        else:
+            fileno = os.dup(f.fileno())
+            self._name = f.name
+            if f.mode != mode:
+                raise ValueError('file.mode %r does not match mode parameter %r' % (f.mode, mode))
+            self._name = f.name
+            f.close()
+
+        super(GreenPipe, self).__init__(_SocketDuckForFd(fileno), mode)
+        set_nonblocking(self)
+        self.softspace = 0
+
+    @property
+    def name(self):
+        return self._name
+
+    def __repr__(self):
+        return "<%s %s %r, mode %r at 0x%x>" % (
+            self.closed and 'closed' or 'open',
+            self.__class__.__name__,
+            self.name,
+            self.mode,
+            (id(self) < 0) and (sys.maxint + id(self)) or id(self))
+
+    def close(self):
+        super(GreenPipe, self).close()
+        for method in [
+                'fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
+                'readline', 'readlines', 'seek', 'tell', 'truncate',
+                'write', 'xreadlines', '__iter__', '__next__', 'writelines']:
+            setattr(self, method, _operation_on_closed_file)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        self.close()
+
+    def _get_readahead_len(self):
+        return len(self._rbuf.getvalue())
+
+    def _clear_readahead_buf(self):
+        len = self._get_readahead_len()
+        if len > 0:
+            self.read(len)
+
+    def tell(self):
+        self.flush()
+        try:
+            return os.lseek(self.fileno(), 0, 1) - self._get_readahead_len()
+        except OSError as e:
+            raise IOError(*e.args)
+
+    def seek(self, offset, whence=0):
+        self.flush()
+        if whence == 1 and offset == 0:  # tell synonym
+            return self.tell()
+        if whence == 1:  # adjust offset by what is read ahead
+            offset -= self._get_readahead_len()
+        try:
+            rv = os.lseek(self.fileno(), offset, whence)
+        except OSError as e:
+            raise IOError(*e.args)
+        else:
+            self._clear_readahead_buf()
+            return rv
+
+    if getattr(file, "truncate", None):  # not all OSes implement truncate
+        def truncate(self, size=-1):
+            self.flush()
+            if size == -1:
+                size = self.tell()
+            try:
+                rv = os.ftruncate(self.fileno(), size)
+            except OSError as e:
+                raise IOError(*e.args)
+            else:
+                self.seek(size)  # move position&clear buffer
+                return rv
+
+    def isatty(self):
+        try:
+            return os.isatty(self.fileno())
+        except OSError as e:
+            raise IOError(*e.args)
+
+
+class _SocketDuckForFd(object):
+    """Class implementing all socket method used by _fileobject
+    in cooperative manner using low level os I/O calls.
+    """
+    _refcount = 0
+
+    def __init__(self, fileno):
+        self._fileno = fileno
+        notify_opened(fileno)
+        self._closed = False
+
+    def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
+        if self._closed:
+            # Don't trampoline if we're already closed.
+            raise IOClosed()
+        try:
+            return trampoline(fd, read=read, write=write, timeout=timeout,
+                              timeout_exc=timeout_exc,
+                              mark_as_closed=self._mark_as_closed)
+        except IOClosed:
+            # Our fileno has been obsoleted. Defang ourselves to
+            # prevent spurious closes.
+            self._mark_as_closed()
+            raise
+
+    def _mark_as_closed(self):
+        current = self._closed
+        self._closed = True
+        return current
+
+    @property
+    def _sock(self):
+        return self
+
+    def fileno(self):
+        return self._fileno
+
+    def recv(self, buflen):
+        while True:
+            try:
+                data = os.read(self._fileno, buflen)
+                return data
+            except OSError as e:
+                if get_errno(e) not in SOCKET_BLOCKING:
+                    raise IOError(*e.args)
+            self._trampoline(self, read=True)
+
+    def recv_into(self, buf, nbytes=0, flags=0):
+        if nbytes == 0:
+            nbytes = len(buf)
+        data = self.recv(nbytes)
+        buf[:nbytes] = data
+        return len(data)
+
+    def send(self, data):
+        while True:
+            try:
+                return os.write(self._fileno, data)
+            except OSError as e:
+                if get_errno(e) not in SOCKET_BLOCKING:
+                    raise IOError(*e.args)
+                else:
+                    trampoline(self, write=True)
+
+    def sendall(self, data):
+        len_data = len(data)
+        os_write = os.write
+        fileno = self._fileno
+        try:
+            total_sent = os_write(fileno, data)
+        except OSError as e:
+            if get_errno(e) != errno.EAGAIN:
+                raise IOError(*e.args)
+            total_sent = 0
+        while total_sent < len_data:
+            self._trampoline(self, write=True)
+            try:
+                total_sent += os_write(fileno, data[total_sent:])
+            except OSError as e:
+                if get_errno(e) != errno. EAGAIN:
+                    raise IOError(*e.args)
+
+    def __del__(self):
+        self._close()
+
+    def _close(self):
+        was_closed = self._mark_as_closed()
+        if was_closed:
+            return
+        notify_close(self._fileno)
+        try:
+            os.close(self._fileno)
+        except:
+            # os.close may fail if __init__ didn't complete
+            # (i.e file dscriptor passed to popen was invalid
+            pass
+
+    def __repr__(self):
+        return "%s:%d" % (self.__class__.__name__, self._fileno)
+
+    def _reuse(self):
+        self._refcount += 1
+
+    def _drop(self):
+        self._refcount -= 1
+        if self._refcount == 0:
+            self._close()
diff --git a/python-eventlet/eventlet/greenio/py3.py b/python-eventlet/eventlet/greenio/py3.py
new file mode 100644 (file)
index 0000000..338ac68
--- /dev/null
@@ -0,0 +1,192 @@
+import _pyio as _original_pyio
+import errno
+import os as _original_os
+import socket as _original_socket
+from io import (
+    BufferedRandom as _OriginalBufferedRandom,
+    BufferedReader as _OriginalBufferedReader,
+    BufferedWriter as _OriginalBufferedWriter,
+    DEFAULT_BUFFER_SIZE,
+    TextIOWrapper as _OriginalTextIOWrapper,
+    IOBase as _OriginalIOBase,
+)
+from types import FunctionType
+
+from eventlet.greenio.base import (
+    _operation_on_closed_file,
+    greenpipe_doc,
+    set_nonblocking,
+    SOCKET_BLOCKING,
+)
+from eventlet.hubs import notify_close, notify_opened, IOClosed, trampoline
+from eventlet.support import get_errno, six
+
+__all__ = ['_fileobject', 'GreenPipe']
+
+# TODO get rid of this, it only seems like the original _fileobject
+_fileobject = _original_socket.SocketIO
+
+# Large part of the following code is copied from the original
+# eventlet.greenio module
+
+
+class GreenFileIO(_OriginalIOBase):
+    def __init__(self, name, mode='r', closefd=True, opener=None):
+        if isinstance(name, int):
+            fileno = name
+            self._name = "<fd:%d>" % fileno
+        else:
+            assert isinstance(name, six.string_types)
+            with open(name, mode) as fd:
+                self._name = fd.name
+                fileno = _original_os.dup(fd.fileno())
+
+        notify_opened(fileno)
+        self._fileno = fileno
+        self._mode = mode
+        self._closed = False
+        set_nonblocking(self)
+        self._seekable = None
+
+    @property
+    def closed(self):
+        return self._closed
+
+    def seekable(self):
+        if self._seekable is None:
+            try:
+                _original_os.lseek(self._fileno, 0, _original_os.SEEK_CUR)
+            except IOError as e:
+                if get_errno(e) == errno.ESPIPE:
+                    self._seekable = False
+                else:
+                    raise
+            else:
+                self._seekable = True
+
+        return self._seekable
+
+    def readable(self):
+        return 'r' in self._mode or '+' in self._mode
+
+    def writable(self):
+        return 'w' in self._mode or '+' in self._mode
+
+    def fileno(self):
+        return self._fileno
+
+    def read(self, buflen):
+        while True:
+            try:
+                return _original_os.read(self._fileno, buflen)
+            except OSError as e:
+                if get_errno(e) not in SOCKET_BLOCKING:
+                    raise IOError(*e.args)
+            self._trampoline(self, read=True)
+
+    def readinto(self, b):
+        up_to = len(b)
+        data = self.read(up_to)
+        bytes_read = len(data)
+        b[:bytes_read] = data
+        return bytes_read
+
+    def isatty(self):
+        try:
+            return _original_os.isatty(self.fileno())
+        except OSError as e:
+            raise IOError(*e.args)
+
+    def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
+        if self._closed:
+            # Don't trampoline if we're already closed.
+            raise IOClosed()
+        try:
+            return trampoline(fd, read=read, write=write, timeout=timeout,
+                              timeout_exc=timeout_exc,
+                              mark_as_closed=self._mark_as_closed)
+        except IOClosed:
+            # Our fileno has been obsoleted. Defang ourselves to
+            # prevent spurious closes.
+            self._mark_as_closed()
+            raise
+
+    def _mark_as_closed(self):
+        """ Mark this socket as being closed """
+        self._closed = True
+
+    def write(self, data):
+        while True:
+            try:
+                return _original_os.write(self._fileno, data)
+            except OSError as e:
+                if get_errno(e) not in SOCKET_BLOCKING:
+                    raise IOError(*e.args)
+                else:
+                    trampoline(self, write=True)
+
+    def close(self):
+        if not self._closed:
+            self._closed = True
+            _original_os.close(self._fileno)
+        notify_close(self._fileno)
+        for method in [
+                'fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
+                'readline', 'readlines', 'seek', 'tell', 'truncate',
+                'write', 'xreadlines', '__iter__', '__next__', 'writelines']:
+            setattr(self, method, _operation_on_closed_file)
+
+    def truncate(self, size=-1):
+        if size == -1:
+            size = self.tell()
+        try:
+            rv = _original_os.ftruncate(self._fileno, size)
+        except OSError as e:
+            raise IOError(*e.args)
+        else:
+            self.seek(size)  # move position&clear buffer
+            return rv
+
+    def seek(self, offset, whence=_original_os.SEEK_SET):
+        try:
+            return _original_os.lseek(self._fileno, offset, whence)
+        except OSError as e:
+            raise IOError(*e.args)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        self.close()
+
+
+_open_environment = dict(globals())
+_open_environment.update(dict(
+    BufferedRandom=_OriginalBufferedRandom,
+    BufferedWriter=_OriginalBufferedWriter,
+    BufferedReader=_OriginalBufferedReader,
+    TextIOWrapper=_OriginalTextIOWrapper,
+    FileIO=GreenFileIO,
+    os=_original_os,
+))
+
+_open = FunctionType(
+    six.get_function_code(_original_pyio.open),
+    _open_environment,
+)
+
+
+def GreenPipe(name, mode="r", buffering=-1, encoding=None, errors=None,
+              newline=None, closefd=True, opener=None):
+    try:
+        fileno = name.fileno()
+    except AttributeError:
+        pass
+    else:
+        fileno = _original_os.dup(fileno)
+        name.close()
+        name = fileno
+
+    return _open(name, mode, buffering, encoding, errors, newline, closefd, opener)
+
+GreenPipe.__doc__ = greenpipe_doc
similarity index 94%
rename from eventlet/eventlet/patcher.py
rename to python-eventlet/eventlet/patcher.py
index ea3189718129a8998828cf7902739388f95a8511..eb09f9ad6c44509c8fdc8c423d9030ffa92d10b4 100644 (file)
@@ -223,8 +223,19 @@ def monkey_patch(**on):
     It's safe to call monkey_patch multiple times.
     """
     accepted_args = set(('os', 'select', 'socket',
     It's safe to call monkey_patch multiple times.
     """
     accepted_args = set(('os', 'select', 'socket',
-                         'thread', 'time', 'psycopg', 'MySQLdb', '__builtin__'))
+                         'thread', 'time', 'psycopg', 'MySQLdb',
+                         'builtins'))
+    # To make sure only one of them is passed here
+    assert not ('__builtin__' in on and 'builtins' in on)
+    try:
+        b = on.pop('__builtin__')
+    except KeyError:
+        pass
+    else:
+        on['builtins'] = b
+
     default_on = on.pop("all", None)
     default_on = on.pop("all", None)
+
     for k in six.iterkeys(on):
         if k not in accepted_args:
             raise TypeError("monkey_patch() got an unexpected "
     for k in six.iterkeys(on):
         if k not in accepted_args:
             raise TypeError("monkey_patch() got an unexpected "
@@ -235,7 +246,7 @@ def monkey_patch(**on):
         if modname == 'MySQLdb':
             # MySQLdb is only on when explicitly patched for the moment
             on.setdefault(modname, False)
         if modname == 'MySQLdb':
             # MySQLdb is only on when explicitly patched for the moment
             on.setdefault(modname, False)
-        if modname == '__builtin__':
+        if modname == 'builtins':
             on.setdefault(modname, False)
         on.setdefault(modname, default_on)
 
             on.setdefault(modname, False)
         on.setdefault(modname, default_on)
 
@@ -258,9 +269,9 @@ def monkey_patch(**on):
     if on.get('MySQLdb') and not already_patched.get('MySQLdb'):
         modules_to_patch += _green_MySQLdb()
         already_patched['MySQLdb'] = True
     if on.get('MySQLdb') and not already_patched.get('MySQLdb'):
         modules_to_patch += _green_MySQLdb()
         already_patched['MySQLdb'] = True
-    if on.get('__builtin__') and not already_patched.get('__builtin__'):
+    if on.get('builtins') and not already_patched.get('builtins'):
         modules_to_patch += _green_builtins()
         modules_to_patch += _green_builtins()
-        already_patched['__builtin__'] = True
+        already_patched['builtins'] = True
     if on['psycopg'] and not already_patched.get('psycopg'):
         try:
             from eventlet.support import psycopg2_patcher
     if on['psycopg'] and not already_patched.get('psycopg'):
         try:
             from eventlet.support import psycopg2_patcher
@@ -293,6 +304,13 @@ def monkey_patch(**on):
         # importlib must use real thread locks, not eventlet.Semaphore
         importlib._bootstrap._thread = thread
 
         # importlib must use real thread locks, not eventlet.Semaphore
         importlib._bootstrap._thread = thread
 
+        # Issue #185: Since Python 3.3, threading.RLock is implemented in C and
+        # so call a C function to get the thread identifier, instead of calling
+        # threading.get_ident(). Force the Python implementation of RLock which
+        # calls threading.get_ident() and so is compatible with eventlet.
+        import threading
+        threading.RLock = threading._PyRLock
+
 
 def is_monkey_patched(module):
     """Returns True if the given module is monkeypatched currently, False if
 
 def is_monkey_patched(module):
     """Returns True if the given module is monkeypatched currently, False if
@@ -351,7 +369,7 @@ def _green_MySQLdb():
 def _green_builtins():
     try:
         from eventlet.green import builtin
 def _green_builtins():
     try:
         from eventlet.green import builtin
-        return [('__builtin__', builtin)]
+        return [('__builtin__' if six.PY2 else 'builtins', builtin)]
     except ImportError:
         return []
 
     except ImportError:
         return []
 
similarity index 90%
rename from eventlet/eventlet/semaphore.py
rename to python-eventlet/eventlet/semaphore.py
index 73dbbc15a32c48057e4d1388115916077f30127c..b2ef9d34f78e7c37b31a8079f392accb40491f54 100644 (file)
@@ -1,4 +1,7 @@
 from __future__ import with_statement
 from __future__ import with_statement
+
+import collections
+
 from eventlet import greenthread
 from eventlet import hubs
 from eventlet.timeout import Timeout
 from eventlet import greenthread
 from eventlet import hubs
 from eventlet.timeout import Timeout
@@ -35,7 +38,7 @@ class Semaphore(object):
         if value < 0:
             raise ValueError("Semaphore must be initialized with a positive "
                              "number, got %s" % value)
         if value < 0:
             raise ValueError("Semaphore must be initialized with a positive "
                              "number, got %s" % value)
-        self._waiters = set()
+        self._waiters = collections.deque()
 
     def __repr__(self):
         params = (self.__class__.__name__, hex(id(self)),
 
     def __repr__(self):
         params = (self.__class__.__name__, hex(id(self)),
@@ -75,13 +78,25 @@ class Semaphore(object):
         When invoked with blocking set to false, do not block. If a call without
         an argument would block, return false immediately; otherwise, do the
         same thing as when called without arguments, and return true.
         When invoked with blocking set to false, do not block. If a call without
         an argument would block, return false immediately; otherwise, do the
         same thing as when called without arguments, and return true.
+
+        Timeout value must be strictly positive.
         """
         """
-        if not blocking and timeout is not None:
-            raise ValueError("can't specify timeout for non-blocking acquire")
+        if timeout == -1:
+            timeout = None
+        if timeout is not None and timeout < 0:
+            raise ValueError("timeout value must be strictly positive")
+        if not blocking:
+            if timeout is not None:
+                raise ValueError("can't specify timeout for non-blocking acquire")
+            timeout = 0
         if not blocking and self.locked():
             return False
         if not blocking and self.locked():
             return False
-        if self.counter <= 0:
-            self._waiters.add(greenthread.getcurrent())
+
+        current_thread = greenthread.getcurrent()
+
+        if self.counter <= 0 or self._waiters:
+            if current_thread not in self._waiters:
+                self._waiters.append(current_thread)
             try:
                 if timeout is not None:
                     ok = False
             try:
                 if timeout is not None:
                     ok = False
@@ -92,10 +107,19 @@ class Semaphore(object):
                     if not ok:
                         return False
                 else:
                     if not ok:
                         return False
                 else:
-                    while self.counter <= 0:
+                    # If someone else is already in this wait loop, give them
+                    # a chance to get out.
+                    while True:
                         hubs.get_hub().switch()
                         hubs.get_hub().switch()
+                        if self.counter > 0:
+                            break
             finally:
             finally:
-                self._waiters.discard(greenthread.getcurrent())
+                try:
+                    self._waiters.remove(current_thread)
+                except ValueError:
+                    # Fine if its already been dropped.
+                    pass
+
         self.counter -= 1
         return True
 
         self.counter -= 1
         return True
 
@@ -117,7 +141,7 @@ class Semaphore(object):
 
     def _do_acquire(self):
         if self._waiters and self.counter > 0:
 
     def _do_acquire(self):
         if self._waiters and self.counter > 0:
-            waiter = self._waiters.pop()
+            waiter = self._waiters.popleft()
             waiter.switch()
 
     def __exit__(self, typ, val, tb):
             waiter.switch()
 
     def __exit__(self, typ, val, tb):
diff --git a/python-eventlet/eventlet/support/greendns.py b/python-eventlet/eventlet/support/greendns.py
new file mode 100644 (file)
index 0000000..7a23bb4
--- /dev/null
@@ -0,0 +1,723 @@
+'''greendns - non-blocking DNS support for Eventlet
+'''
+
+# Portions of this code taken from the gogreen project:
+#   http://github.com/slideinc/gogreen
+#
+# Copyright (c) 2005-2010 Slide, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of the author nor the names of other
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import struct
+
+from eventlet import patcher
+from eventlet.green import _socket_nodns
+from eventlet.green import os
+from eventlet.green import time
+from eventlet.green import select
+from eventlet.support import six
+
+
+dns = patcher.import_patched('dns',
+                             select=select,
+                             time=time,
+                             os=os,
+                             socket=_socket_nodns)
+dns.resolver = patcher.import_patched('dns.resolver',
+                                      select=select,
+                                      time=time,
+                                      os=os,
+                                      socket=_socket_nodns)
+
+for pkg in ('dns.entropy', 'dns.inet', 'dns.query'):
+    setattr(dns, pkg.split('.')[1], patcher.import_patched(pkg,
+                                                           select=select,
+                                                           time=time,
+                                                           os=os,
+                                                           socket=_socket_nodns))
+import dns.rdtypes
+for pkg in ['dns.rdtypes.IN', 'dns.rdtypes.ANY']:
+    setattr(dns.rdtypes, pkg.split('.')[-1], patcher.import_patched(pkg))
+for pkg in ['dns.rdtypes.IN.A', 'dns.rdtypes.IN.AAAA']:
+    setattr(dns.rdtypes.IN, pkg.split('.')[-1], patcher.import_patched(pkg))
+for pkg in ['dns.rdtypes.ANY.CNAME']:
+    setattr(dns.rdtypes.ANY, pkg.split('.')[-1], patcher.import_patched(pkg))
+
+
+socket = _socket_nodns
+
+DNS_QUERY_TIMEOUT = 10.0
+HOSTS_TTL = 10.0
+
+EAI_EAGAIN_ERROR = socket.gaierror(socket.EAI_AGAIN, 'Lookup timed out')
+EAI_NODATA_ERROR = socket.gaierror(socket.EAI_NODATA, 'No address associated with hostname')
+EAI_NONAME_ERROR = socket.gaierror(socket.EAI_NONAME, 'Name or service not known')
+
+
+def is_ipv4_addr(host):
+    """Return True if host is a valid IPv4 address"""
+    if not isinstance(host, six.string_types):
+        return False
+    try:
+        dns.ipv4.inet_aton(host)
+    except dns.exception.SyntaxError:
+        return False
+    else:
+        return True
+
+
+def is_ipv6_addr(host):
+    """Return True if host is a valid IPv6 address"""
+    if not isinstance(host, six.string_types):
+        return False
+    try:
+        dns.ipv6.inet_aton(host)
+    except dns.exception.SyntaxError:
+        return False
+    else:
+        return True
+
+
+def is_ip_addr(host):
+    """Return True if host is a valid IPv4 or IPv6 address"""
+    return is_ipv4_addr(host) or is_ipv6_addr(host)
+
+
+class HostsAnswer(dns.resolver.Answer):
+    """Answer class for HostsResolver object"""
+
+    def __init__(self, qname, rdtype, rdclass, rrset, raise_on_no_answer=True):
+        """Create a new answer
+
+        :qname: A dns.name.Name instance of the query name
+        :rdtype: The rdatatype of the query
+        :rdclass: The rdataclass of the query
+        :rrset: The dns.rrset.RRset with the response, must have ttl attribute
+        :raise_on_no_answer: Whether to raise dns.resolver.NoAnswer if no
+           answer.
+        """
+        self.response = None
+        self.qname = qname
+        self.rdtype = rdtype
+        self.rdclass = rdclass
+        self.canonical_name = qname
+        if not rrset and raise_on_no_answer:
+            raise dns.resolver.NoAnswer()
+        self.rrset = rrset
+        self.expiration = (time.time() +
+                           rrset.ttl if hasattr(rrset, 'ttl') else 0)
+
+
+class HostsResolver(object):
+    """Class to parse the hosts file
+
+    Attributes
+    ----------
+
+    :fname: The filename of the hosts file in use.
+    :interval: The time between checking for hosts file modification
+    """
+
+    def __init__(self, fname=None, interval=HOSTS_TTL):
+        self._v4 = {}           # name -> ipv4
+        self._v6 = {}           # name -> ipv6
+        self._aliases = {}      # name -> cannonical_name
+        self.interval = interval
+        self.fname = fname
+        if fname is None:
+            if os.name == 'posix':
+                self.fname = '/etc/hosts'
+            elif os.name == 'nt':
+                self.fname = os.path.expandvars(
+                    r'%SystemRoot%\system32\drivers\etc\hosts')
+        self._last_load = 0
+        if self.fname:
+            self._load()
+
+    def _readlines(self):
+        """Read the contents of the hosts file
+
+        Return list of lines, comment lines and empty lines are
+        excluded.
+
+        Note that this performs disk I/O so can be blocking.
+        """
+        lines = []
+        try:
+            with open(self.fname, 'rU') as fp:
+                for line in fp:
+                    line = line.strip()
+                    if line and line[0] != '#':
+                        lines.append(line)
+        except (IOError, OSError):
+            pass
+        return lines
+
+    def _load(self):
+        """Load hosts file
+
+        This will unconditionally (re)load the data from the hosts
+        file.
+        """
+        lines = self._readlines()
+        self._v4.clear()
+        self._v6.clear()
+        self._aliases.clear()
+        for line in lines:
+            parts = line.split()
+            if len(parts) < 2:
+                continue
+            ip = parts.pop(0)
+            if is_ipv4_addr(ip):
+                ipmap = self._v4
+            elif is_ipv6_addr(ip):
+                if ip.startswith('fe80'):
+                    # Do not use link-local addresses, OSX stores these here
+                    continue
+                ipmap = self._v6
+            else:
+                continue
+            cname = parts.pop(0)
+            ipmap[cname] = ip
+            for alias in parts:
+                ipmap[alias] = ip
+                self._aliases[alias] = cname
+        self._last_load = time.time()
+
+    def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
+              tcp=False, source=None, raise_on_no_answer=True):
+        """Query the hosts file
+
+        The known rdtypes are dns.rdatatype.A, dns.rdatatype.AAAA and
+        dns.rdatatype.CNAME.
+
+        The ``rdclass`` parameter must be dns.rdataclass.IN while the
+        ``tcp`` and ``source`` parameters are ignored.
+
+        Return a HostAnswer instance or raise a dns.resolver.NoAnswer
+        exception.
+        """
+        now = time.time()
+        if self._last_load + self.interval < now:
+            self._load()
+        rdclass = dns.rdataclass.IN
+        if isinstance(qname, six.string_types):
+            name = qname
+            qname = dns.name.from_text(qname)
+        else:
+            name = str(qname)
+        rrset = dns.rrset.RRset(qname, rdclass, rdtype)
+        rrset.ttl = self._last_load + self.interval - now
+        if rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.A:
+            addr = self._v4.get(name)
+            if not addr and qname.is_absolute():
+                addr = self._v4.get(name[:-1])
+            if addr:
+                rrset.add(dns.rdtypes.IN.A.A(rdclass, rdtype, addr))
+        elif rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.AAAA:
+            addr = self._v6.get(name)
+            if not addr and qname.is_absolute():
+                addr = self._v6.get(name[:-1])
+            if addr:
+                rrset.add(dns.rdtypes.IN.AAAA.AAAA(rdclass, rdtype, addr))
+        elif rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.CNAME:
+            cname = self._aliases.get(name)
+            if not cname and qname.is_absolute():
+                cname = self._aliases.get(name[:-1])
+            if cname:
+                rrset.add(dns.rdtypes.ANY.CNAME.CNAME(
+                    rdclass, rdtype, dns.name.from_text(cname)))
+        return HostsAnswer(qname, rdtype, rdclass, rrset, raise_on_no_answer)
+
+    def getaliases(self, hostname):
+        """Return a list of all the aliases of a given cname"""
+        # Due to the way store aliases this is a bit inefficient, this
+        # clearly was an afterthought.  But this is only used by
+        # gethostbyname_ex so it's probably fine.
+        aliases = []
+        if hostname in self._aliases:
+            cannon = self._aliases[hostname]
+        else:
+            cannon = hostname
+        aliases.append(cannon)
+        for alias, cname in six.iteritems(self._aliases):
+            if cannon == cname:
+                aliases.append(alias)
+        aliases.remove(hostname)
+        return aliases
+
+
+class ResolverProxy(object):
+    """Resolver class which can also use /etc/hosts
+
+    Initialise with a HostsResolver instance in order for it to also
+    use the hosts file.
+    """
+
+    def __init__(self, hosts_resolver=None, filename='/etc/resolv.conf'):
+        """Initialise the resolver proxy
+
+        :param hosts_resolver: An instance of HostsResolver to use.
+
+        :param filename: The filename containing the resolver
+           configuration.  The default value is correct for both UNIX
+           and Windows, on Windows it will result in the configuration
+           being read from the Windows registry.
+        """
+        self._hosts = hosts_resolver
+        self._filename = filename
+        self._resolver = dns.resolver.Resolver(filename=self._filename)
+        self._resolver.cache = dns.resolver.LRUCache()
+
+    def clear(self):
+        self._resolver = dns.resolver.Resolver(filename=self._filename)
+        self._resolver.cache = dns.resolver.Cache()
+
+    def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
+              tcp=False, source=None, raise_on_no_answer=True):
+        """Query the resolver, using /etc/hosts if enabled"""
+        if qname is None:
+            qname = '0.0.0.0'
+        if rdclass == dns.rdataclass.IN and self._hosts:
+            try:
+                return self._hosts.query(qname, rdtype)
+            except dns.resolver.NoAnswer:
+                pass
+        return self._resolver.query(qname, rdtype, rdclass,
+                                    tcp, source, raise_on_no_answer)
+
+    def getaliases(self, hostname):
+        """Return a list of all the aliases of a given hostname"""
+        if self._hosts:
+            aliases = self._hosts.getaliases(hostname)
+        else:
+            aliases = []
+        while True:
+            try:
+                ans = self._resolver.query(hostname, dns.rdatatype.CNAME)
+            except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
+                break
+            else:
+                aliases.extend(str(rr.target) for rr in ans.rrset)
+                hostname = ans[0].target
+        return aliases
+
+
+resolver = ResolverProxy(hosts_resolver=HostsResolver())
+
+
+def resolve(name, family=socket.AF_INET, raises=True):
+    """Resolve a name for a given family using the global resolver proxy
+
+    This method is called by the global getaddrinfo() function.
+
+    Return a dns.resolver.Answer instance.  If there is no answer it's
+    rrset will be emtpy.
+    """
+    if family == socket.AF_INET:
+        rdtype = dns.rdatatype.A
+    elif family == socket.AF_INET6:
+        rdtype = dns.rdatatype.AAAA
+    else:
+        raise socket.gaierror(socket.EAI_FAMILY,
+                              'Address family not supported')
+    try:
+        try:
+            return resolver.query(name, rdtype, raise_on_no_answer=raises)
+        except dns.resolver.NXDOMAIN:
+            if not raises:
+                return HostsAnswer(dns.name.Name(name),
+                                   rdtype, dns.rdataclass.IN, None, False)
+            raise
+    except dns.exception.Timeout:
+        raise EAI_EAGAIN_ERROR
+    except dns.exception.DNSException:
+        raise EAI_NODATA_ERROR
+
+
+def resolve_cname(host):
+    """Return the canonical name of a hostname"""
+    try:
+        ans = resolver.query(host, dns.rdatatype.CNAME)
+    except dns.resolver.NoAnswer:
+        return host
+    except dns.exception.Timeout:
+        raise EAI_EAGAIN_ERROR
+    except dns.exception.DNSException:
+        raise EAI_NODATA_ERROR
+    else:
+        return str(ans[0].target)
+
+
+def getaliases(host):
+    """Return a list of for aliases for the given hostname
+
+    This method does translate the dnspython exceptions into
+    socket.gaierror exceptions.  If no aliases are available an empty
+    list will be returned.
+    """
+    try:
+        return resolver.getaliases(host)
+    except dns.exception.Timeout:
+        raise EAI_EAGAIN_ERROR
+    except dns.exception.DNSException:
+        raise EAI_NODATA_ERROR
+
+
+def _getaddrinfo_lookup(host, family, flags):
+    """Resolve a hostname to a list of addresses
+
+    Helper function for getaddrinfo.
+    """
+    if flags & socket.AI_NUMERICHOST:
+        raise EAI_NONAME_ERROR
+    addrs = []
+    if family == socket.AF_UNSPEC:
+        for qfamily in [socket.AF_INET6, socket.AF_INET]:
+            answer = resolve(host, qfamily, False)
+            if answer.rrset:
+                addrs.extend([rr.address for rr in answer.rrset])
+    elif family == socket.AF_INET6 and flags & socket.AI_V4MAPPED:
+        answer = resolve(host, socket.AF_INET6, False)
+        if answer.rrset:
+            addrs = [rr.address for rr in answer.rrset]
+        if not addrs or flags & socket.AI_ALL:
+            answer = resolve(host, socket.AF_INET, False)
+            if answer.rrset:
+                addrs = ['::ffff:' + rr.address for rr in answer.rrset]
+    else:
+        answer = resolve(host, family, False)
+        if answer.rrset:
+            addrs = [rr.address for rr in answer.rrset]
+    return str(answer.qname), addrs
+
+
+def getaddrinfo(host, port, family=0, socktype=0, proto=0, flags=0):
+    """Replacement for Python's socket.getaddrinfo
+
+    This does the A and AAAA lookups asynchronously after which it
+    calls the OS' getaddrinfo(3) using the AI_NUMERICHOST flag.  This
+    flag ensures getaddrinfo(3) does not use the network itself and
+    allows us to respect all the other arguments like the native OS.
+    """
+    if isinstance(host, six.string_types):
+        host = host.encode('idna').decode('ascii')
+    if host is not None and not is_ip_addr(host):
+        qname, addrs = _getaddrinfo_lookup(host, family, flags)
+    else:
+        qname = host
+        addrs = [host]
+    aiflags = (flags | socket.AI_NUMERICHOST) & (0xffff ^ socket.AI_CANONNAME)
+    res = []
+    err = None
+    for addr in addrs:
+        try:
+            ai = socket.getaddrinfo(addr, port, family,
+                                    socktype, proto, aiflags)
+        except socket.error as e:
+            if flags & socket.AI_ADDRCONFIG:
+                err = e
+                continue
+            raise
+        res.extend(ai)
+    if not res:
+        if err:
+            raise err
+        raise socket.gaierror(socket.EAI_NONAME, 'No address found')
+    if flags & socket.AI_CANONNAME:
+        if not is_ip_addr(qname):
+            qname = resolve_cname(qname).encode('ascii').decode('idna')
+        ai = res[0]
+        res[0] = (ai[0], ai[1], ai[2], qname, ai[4])
+    return res
+
+
+def gethostbyname(hostname):
+    """Replacement for Python's socket.gethostbyname"""
+    if is_ipv4_addr(hostname):
+        return hostname
+    rrset = resolve(hostname)
+    return rrset[0].address
+
+
+def gethostbyname_ex(hostname):
+    """Replacement for Python's socket.gethostbyname_ex"""
+    if is_ipv4_addr(hostname):
+        return (hostname, [], [hostname])
+    ans = resolve(hostname)
+    aliases = getaliases(hostname)
+    addrs = [rr.address for rr in ans.rrset]
+    qname = str(ans.qname)
+    if qname[-1] == '.':
+        qname = qname[:-1]
+    return (qname, aliases, addrs)
+
+
+def getnameinfo(sockaddr, flags):
+    """Replacement for Python's socket.getnameinfo.
+
+    Currently only supports IPv4.
+    """
+    try:
+        host, port = sockaddr
+    except (ValueError, TypeError):
+        if not isinstance(sockaddr, tuple):
+            del sockaddr  # to pass a stdlib test that is
+            # hyper-careful about reference counts
+            raise TypeError('getnameinfo() argument 1 must be a tuple')
+        else:
+            # must be ipv6 sockaddr, pretending we don't know how to resolve it
+            raise EAI_NONAME_ERROR
+
+    if (flags & socket.NI_NAMEREQD) and (flags & socket.NI_NUMERICHOST):
+        # Conflicting flags.  Punt.
+        raise EAI_NONAME_ERROR
+
+    if is_ipv4_addr(host):
+        try:
+            rrset = resolver.query(
+                dns.reversename.from_address(host), dns.rdatatype.PTR)
+            if len(rrset) > 1:
+                raise socket.error('sockaddr resolved to multiple addresses')
+            host = rrset[0].target.to_text(omit_final_dot=True)
+        except dns.exception.Timeout:
+            if flags & socket.NI_NAMEREQD:
+                raise EAI_EAGAIN_ERROR
+        except dns.exception.DNSException:
+            if flags & socket.NI_NAMEREQD:
+                raise EAI_NONAME_ERROR
+    else:
+        try:
+            rrset = resolver.query(host)
+            if len(rrset) > 1:
+                raise socket.error('sockaddr resolved to multiple addresses')
+            if flags & socket.NI_NUMERICHOST:
+                host = rrset[0].address
+        except dns.exception.Timeout:
+            raise EAI_EAGAIN_ERROR
+        except dns.exception.DNSException:
+            raise socket.gaierror(
+                (socket.EAI_NODATA, 'No address associated with hostname'))
+
+        if not (flags & socket.NI_NUMERICSERV):
+            proto = (flags & socket.NI_DGRAM) and 'udp' or 'tcp'
+            port = socket.getservbyport(port, proto)
+
+    return (host, port)
+
+
+def _net_read(sock, count, expiration):
+    """coro friendly replacement for dns.query._net_write
+    Read the specified number of bytes from sock.  Keep trying until we
+    either get the desired amount, or we hit EOF.
+    A Timeout exception will be raised if the operation is not completed
+    by the expiration time.
+    """
+    s = ''
+    while count > 0:
+        try:
+            n = sock.recv(count)
+        except socket.timeout:
+            # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
+            if expiration - time.time() <= 0.0:
+                raise dns.exception.Timeout
+        if n == '':
+            raise EOFError
+        count = count - len(n)
+        s = s + n
+    return s
+
+
+def _net_write(sock, data, expiration):
+    """coro friendly replacement for dns.query._net_write
+    Write the specified data to the socket.
+    A Timeout exception will be raised if the operation is not completed
+    by the expiration time.
+    """
+    current = 0
+    l = len(data)
+    while current < l:
+        try:
+            current += sock.send(data[current:])
+        except socket.timeout:
+            # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
+            if expiration - time.time() <= 0.0:
+                raise dns.exception.Timeout
+
+
+def udp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53,
+        af=None, source=None, source_port=0, ignore_unexpected=False):
+    """coro friendly replacement for dns.query.udp
+    Return the response obtained after sending a query via UDP.
+
+    @param q: the query
+    @type q: dns.message.Message
+    @param where: where to send the message
+    @type where: string containing an IPv4 or IPv6 address
+    @param timeout: The number of seconds to wait before the query times out.
+    If None, the default, wait forever.
+    @type timeout: float
+    @param port: The port to which to send the message.  The default is 53.
+    @type port: int
+    @param af: the address family to use.  The default is None, which
+    causes the address family to use to be inferred from the form of of where.
+    If the inference attempt fails, AF_INET is used.
+    @type af: int
+    @rtype: dns.message.Message object
+    @param source: source address.  The default is the IPv4 wildcard address.
+    @type source: string
+    @param source_port: The port from which to send the message.
+    The default is 0.
+    @type source_port: int
+    @param ignore_unexpected: If True, ignore responses from unexpected
+    sources.  The default is False.
+    @type ignore_unexpected: bool"""
+
+    wire = q.to_wire()
+    if af is None:
+        try:
+            af = dns.inet.af_for_address(where)
+        except:
+            af = dns.inet.AF_INET
+    if af == dns.inet.AF_INET:
+        destination = (where, port)
+        if source is not None:
+            source = (source, source_port)
+    elif af == dns.inet.AF_INET6:
+        destination = (where, port, 0, 0)
+        if source is not None:
+            source = (source, source_port, 0, 0)
+
+    s = socket.socket(af, socket.SOCK_DGRAM)
+    s.settimeout(timeout)
+    try:
+        expiration = dns.query._compute_expiration(timeout)
+        if source is not None:
+            s.bind(source)
+        try:
+            s.sendto(wire, destination)
+        except socket.timeout:
+            # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
+            if expiration - time.time() <= 0.0:
+                raise dns.exception.Timeout
+        while 1:
+            try:
+                (wire, from_address) = s.recvfrom(65535)
+            except socket.timeout:
+                # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
+                if expiration - time.time() <= 0.0:
+                    raise dns.exception.Timeout
+            if from_address == destination:
+                break
+            if not ignore_unexpected:
+                raise dns.query.UnexpectedSource(
+                    'got a response from %s instead of %s'
+                    % (from_address, destination))
+    finally:
+        s.close()
+
+    r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac)
+    if not q.is_response(r):
+        raise dns.query.BadResponse()
+    return r
+
+
+def tcp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53,
+        af=None, source=None, source_port=0):
+    """coro friendly replacement for dns.query.tcp
+    Return the response obtained after sending a query via TCP.
+
+    @param q: the query
+    @type q: dns.message.Message object
+    @param where: where to send the message
+    @type where: string containing an IPv4 or IPv6 address
+    @param timeout: The number of seconds to wait before the query times out.
+    If None, the default, wait forever.
+    @type timeout: float
+    @param port: The port to which to send the message.  The default is 53.
+    @type port: int
+    @param af: the address family to use.  The default is None, which
+    causes the address family to use to be inferred from the form of of where.
+    If the inference attempt fails, AF_INET is used.
+    @type af: int
+    @rtype: dns.message.Message object
+    @param source: source address.  The default is the IPv4 wildcard address.
+    @type source: string
+    @param source_port: The port from which to send the message.
+    The default is 0.
+    @type source_port: int"""
+
+    wire = q.to_wire()
+    if af is None:
+        try:
+            af = dns.inet.af_for_address(where)
+        except:
+            af = dns.inet.AF_INET
+    if af == dns.inet.AF_INET:
+        destination = (where, port)
+        if source is not None:
+            source = (source, source_port)
+    elif af == dns.inet.AF_INET6:
+        destination = (where, port, 0, 0)
+        if source is not None:
+            source = (source, source_port, 0, 0)
+    s = socket.socket(af, socket.SOCK_STREAM)
+    s.settimeout(timeout)
+    try:
+        expiration = dns.query._compute_expiration(timeout)
+        if source is not None:
+            s.bind(source)
+        try:
+            s.connect(destination)
+        except socket.timeout:
+            # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
+            if expiration - time.time() <= 0.0:
+                raise dns.exception.Timeout
+
+        l = len(wire)
+        # copying the wire into tcpmsg is inefficient, but lets us
+        # avoid writev() or doing a short write that would get pushed
+        # onto the net
+        tcpmsg = struct.pack("!H", l) + wire
+        _net_write(s, tcpmsg, expiration)
+        ldata = _net_read(s, 2, expiration)
+        (l,) = struct.unpack("!H", ldata)
+        wire = _net_read(s, l, expiration)
+    finally:
+        s.close()
+    r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac)
+    if not q.is_response(r):
+        raise dns.query.BadResponse()
+    return r
+
+
+def reset():
+    resolver.clear()
+
+# Install our coro-friendly replacements for the tcp and udp query methods.
+dns.query.tcp = tcp
+dns.query.udp = udp
diff --git a/python-eventlet/eventlet/support/greenlets.py b/python-eventlet/eventlet/support/greenlets.py
new file mode 100644 (file)
index 0000000..d4e1793
--- /dev/null
@@ -0,0 +1,8 @@
+import distutils.version
+
+import greenlet
+getcurrent = greenlet.greenlet.getcurrent
+GreenletExit = greenlet.greenlet.GreenletExit
+preserves_excinfo = (distutils.version.LooseVersion(greenlet.__version__)
+                     >= distutils.version.LooseVersion('0.3.2'))
+greenlet = greenlet.greenlet
similarity index 97%
rename from eventlet/eventlet/tpool.py
rename to python-eventlet/eventlet/tpool.py
index e7f0db165c2905f32189f3c222840f56dab95c65..8d73814d437b6586c9eee218e793f2e7d724b99e 100644 (file)
@@ -292,6 +292,16 @@ def killall():
     for thr in _threads:
         thr.join()
     del _threads[:]
     for thr in _threads:
         thr.join()
     del _threads[:]
+
+    # return any remaining results
+    while not _rspq.empty():
+        try:
+            (e, rv) = _rspq.get(block=False)
+            e.send(rv)
+            e = rv = None
+        except Empty:
+            pass
+
     if _coro is not None:
         greenthread.kill(_coro)
     _rsock.close()
     if _coro is not None:
         greenthread.kill(_coro)
     _rsock.close()
similarity index 94%
rename from eventlet/eventlet/wsgi.py
rename to python-eventlet/eventlet/wsgi.py
index e69d107d49e69d593125c5c83cc22542a3312264..72582770ae9edc015ea7580a5035eb7e5d1b591e 100644 (file)
@@ -8,12 +8,13 @@ import warnings
 
 from eventlet.green import BaseHTTPServer
 from eventlet.green import socket
 
 from eventlet.green import BaseHTTPServer
 from eventlet.green import socket
-from eventlet.green import urllib
 from eventlet import greenio
 from eventlet import greenpool
 from eventlet import support
 from eventlet.support import six
 
 from eventlet import greenio
 from eventlet import greenpool
 from eventlet import support
 from eventlet.support import six
 
+from eventlet.support.six.moves import urllib
+
 
 DEFAULT_MAX_SIMULTANEOUS_REQUESTS = 1024
 DEFAULT_MAX_HTTP_VERSION = 'HTTP/1.1'
 
 DEFAULT_MAX_SIMULTANEOUS_REQUESTS = 1024
 DEFAULT_MAX_HTTP_VERSION = 'HTTP/1.1'
@@ -224,6 +225,33 @@ class HeadersTooLarge(Exception):
     pass
 
 
     pass
 
 
+def get_logger(log, debug):
+    if callable(getattr(log, 'info', None)) \
+       and callable(getattr(log, 'debug', None)):
+        return log
+    else:
+        return LoggerFileWrapper(log, debug)
+
+
+class LoggerFileWrapper(object):
+    def __init__(self, log, debug):
+        self.log = log
+        self._debug = debug
+
+    def info(self, msg, *args, **kwargs):
+        self.write(msg, *args)
+
+    def debug(self, msg, *args, **kwargs):
+        if self._debug:
+            self.write(msg, *args)
+
+    def write(self, msg, *args):
+        msg = msg + '\n'
+        if args:
+            msg = msg % args
+        self.log.write(msg)
+
+
 class FileObjectForHeaders(object):
 
     def __init__(self, fp):
 class FileObjectForHeaders(object):
 
     def __init__(self, fp):
@@ -395,24 +423,8 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
                 towrite.append(six.b("%x" % (len(data),)) + b"\r\n" + data + b"\r\n")
             else:
                 towrite.append(data)
                 towrite.append(six.b("%x" % (len(data),)) + b"\r\n" + data + b"\r\n")
             else:
                 towrite.append(data)
-            try:
-                _writelines(towrite)
-                length[0] = length[0] + sum(map(len, towrite))
-            except UnicodeEncodeError:
-                self.server.log_message(
-                    "Encountered non-ascii unicode while attempting to write"
-                    "wsgi response: %r" %
-                    [x for x in towrite if isinstance(x, six.text_type)])
-                self.server.log_message(traceback.format_exc())
-                _writelines(
-                    ["HTTP/1.1 500 Internal Server Error\r\n",
-                     "Connection: close\r\n",
-                     "Content-type: text/plain\r\n",
-                     "Content-length: 98\r\n",
-                     "Date: %s\r\n" % format_date_time(time.time()),
-                     "\r\n",
-                     ("Internal Server Error: wsgi application passed "
-                      "a unicode object to the server instead of a string.")])
+            _writelines(towrite)
+            length[0] = length[0] + sum(map(len, towrite))
 
         def start_response(status, response_headers, exc_info=None):
             status_code[0] = status.split()[0]
 
         def start_response(status, response_headers, exc_info=None):
             status_code[0] = status.split()[0]
@@ -456,6 +468,9 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
                 minimum_write_chunk_size = int(self.environ.get(
                     'eventlet.minimum_write_chunk_size', self.minimum_chunk_size))
                 for data in result:
                 minimum_write_chunk_size = int(self.environ.get(
                     'eventlet.minimum_write_chunk_size', self.minimum_chunk_size))
                 for data in result:
+                    if isinstance(data, six.text_type):
+                        data = data.encode('ascii')
+
                     towrite.append(data)
                     towrite_size += len(data)
                     if towrite_size >= minimum_write_chunk_size:
                     towrite.append(data)
                     towrite_size += len(data)
                     if towrite_size >= minimum_write_chunk_size:
@@ -471,8 +486,8 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
             except Exception:
                 self.close_connection = 1
                 tb = traceback.format_exc()
             except Exception:
                 self.close_connection = 1
                 tb = traceback.format_exc()
-                self.server.log_message(tb)
-                if not headers_set:
+                self.server.log.info(tb)
+                if not headers_sent:
                     err_body = six.b(tb) if self.server.debug else b''
                     start_response("500 Internal Server Error",
                                    [('Content-type', 'text/plain'),
                     err_body = six.b(tb) if self.server.debug else b''
                     start_response("500 Internal Server Error",
                                    [('Content-type', 'text/plain'),
@@ -497,7 +512,7 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
                 hook(self.environ, *args, **kwargs)
 
             if self.server.log_output:
                 hook(self.environ, *args, **kwargs)
 
             if self.server.log_output:
-                self.server.log_message(self.server.log_format % {
+                self.server.log.info(self.server.log_format % {
                     'client_ip': self.get_client_ip(),
                     'client_port': self.client_address[1],
                     'date_time': self.log_date_time_string(),
                     'client_ip': self.get_client_ip(),
                     'client_port': self.client_address[1],
                     'date_time': self.log_date_time_string(),
@@ -522,7 +537,7 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
 
         pq = self.path.split('?', 1)
         env['RAW_PATH_INFO'] = pq[0]
 
         pq = self.path.split('?', 1)
         env['RAW_PATH_INFO'] = pq[0]
-        env['PATH_INFO'] = urllib.unquote(pq[0])
+        env['PATH_INFO'] = urllib.parse.unquote(pq[0])
         if len(pq) > 1:
             env['QUERY_STRING'] = pq[1]
 
         if len(pq) > 1:
             env['QUERY_STRING'] = pq[1]
 
@@ -616,9 +631,9 @@ class Server(BaseHTTPServer.HTTPServer):
         self.socket = socket
         self.address = address
         if log:
         self.socket = socket
         self.address = address
         if log:
-            self.log = log
+            self.log = get_logger(log, debug)
         else:
         else:
-            self.log = sys.stderr
+            self.log = get_logger(sys.stderr, debug)
         self.app = app
         self.keepalive = keepalive
         self.environ = environ
         self.app = app
         self.keepalive = keepalive
         self.environ = environ
@@ -672,12 +687,12 @@ class Server(BaseHTTPServer.HTTPServer):
         except socket.timeout:
             # Expected exceptions are not exceptional
             sock.close()
         except socket.timeout:
             # Expected exceptions are not exceptional
             sock.close()
-            if self.debug:
-                # similar to logging "accepted" in server()
-                self.log_message('(%s) timed out %r' % (self.pid, address))
+            # similar to logging "accepted" in server()
+            self.log.debug('(%s) timed out %r' % (self.pid, address))
 
     def log_message(self, message):
 
     def log_message(self, message):
-        self.log.write(message + '\n')
+        warnings.warn('server.log_message is deprecated.  Please use server.log.info instead')
+        self.log.info(message)
 
 
 try:
 
 
 try:
@@ -801,15 +816,14 @@ def server(sock, site,
             if port == ':80':
                 port = ''
 
             if port == ':80':
                 port = ''
 
-        serv.log.write("(%s) wsgi starting up on %s://%s%s/\n" % (
+        serv.log.info("(%s) wsgi starting up on %s://%s%s/" % (
             serv.pid, scheme, host, port))
         while is_accepting:
             try:
                 client_socket = sock.accept()
                 client_socket[0].settimeout(serv.socket_timeout)
             serv.pid, scheme, host, port))
         while is_accepting:
             try:
                 client_socket = sock.accept()
                 client_socket[0].settimeout(serv.socket_timeout)
-                if debug:
-                    serv.log.write("(%s) accepted %r\n" % (
-                        serv.pid, client_socket[1]))
+                serv.log.debug("(%s) accepted %r" % (
+                    serv.pid, client_socket[1]))
                 try:
                     pool.spawn_n(serv.process_request, client_socket)
                 except AttributeError:
                 try:
                     pool.spawn_n(serv.process_request, client_socket)
                 except AttributeError:
@@ -822,11 +836,11 @@ def server(sock, site,
                 if support.get_errno(e) not in ACCEPT_ERRNO:
                     raise
             except (KeyboardInterrupt, SystemExit):
                 if support.get_errno(e) not in ACCEPT_ERRNO:
                     raise
             except (KeyboardInterrupt, SystemExit):
-                serv.log.write("wsgi exiting\n")
+                serv.log.info("wsgi exiting")
                 break
     finally:
         pool.waitall()
                 break
     finally:
         pool.waitall()
-        serv.log.write("(%s) wsgi exited, is_accepting=%s\n" % (
+        serv.log.info("(%s) wsgi exited, is_accepting=%s" % (
             serv.pid, is_accepting))
         try:
             # NOTE: It's not clear whether we want this to leave the
             serv.pid, is_accepting))
         try:
             # NOTE: It's not clear whether we want this to leave the
diff --git a/python-eventlet/setup.cfg b/python-eventlet/setup.cfg
new file mode 100644 (file)
index 0000000..e57d130
--- /dev/null
@@ -0,0 +1,2 @@
+[wheel]
+universal = True
similarity index 86%
rename from eventlet/setup.py
rename to python-eventlet/setup.py
index 7a601d5249cfa3fec4cdc4630585b42a5f012e41..4ccc966e105172aa9a3830043e0bc23f9e0f31f4 100644 (file)
@@ -11,7 +11,7 @@ setup(
     author='Linden Lab',
     author_email='eventletdev@lists.secondlife.com',
     url='http://eventlet.net',
     author='Linden Lab',
     author_email='eventletdev@lists.secondlife.com',
     url='http://eventlet.net',
-    packages=find_packages(exclude=['tests', 'benchmarks']),
+    packages=find_packages(exclude=['benchmarks', 'tests', 'tests.*']),
     install_requires=(
         'greenlet >= 0.3',
     ),
     install_requires=(
         'greenlet >= 0.3',
     ),
@@ -31,6 +31,8 @@ setup(
         "Operating System :: Microsoft :: Windows",
         "Programming Language :: Python :: 2.6",
         "Programming Language :: Python :: 2.7",
         "Operating System :: Microsoft :: Windows",
         "Programming Language :: Python :: 2.6",
         "Programming Language :: Python :: 2.7",
+        "Programming Language :: Python :: 3.3",
+        "Programming Language :: Python :: 3.4",
         "Topic :: Internet",
         "Topic :: Software Development :: Libraries :: Python Modules",
         "Intended Audience :: Developers",
         "Topic :: Internet",
         "Topic :: Software Development :: Libraries :: Python Modules",
         "Intended Audience :: Developers",
diff --git a/python-eventlet/tests/README b/python-eventlet/tests/README
new file mode 100644 (file)
index 0000000..4e28ae3
--- /dev/null
@@ -0,0 +1,7 @@
+The tests are intended to be run using Nose.  
+http://somethingaboutorange.com/mrl/projects/nose/
+
+To run tests, simply install nose, and then, in the eventlet tree, do:
+   $ nosetests
+
+That's it!  Its output is the same as unittest's output.  It tends to emit a lot of tracebacks from various poorly-behaving tests, but they still (generally) pass.
\ No newline at end of file
similarity index 82%
rename from eventlet/tests/__init__.py
rename to python-eventlet/tests/__init__.py
index 3dba2420dea1f4787d243bf0e37b2e84fa7d32ad..26c0c2ec5ab49c5102314dcb7991e9dece9b0bf1 100644 (file)
@@ -1,8 +1,11 @@
 # package is named tests, not test, so it won't be confused with test in stdlib
 from __future__ import print_function
 
 # package is named tests, not test, so it won't be confused with test in stdlib
 from __future__ import print_function
 
+import contextlib
 import errno
 import errno
+import functools
 import gc
 import gc
+import json
 import os
 try:
     import resource
 import os
 try:
     import resource
@@ -14,6 +17,8 @@ import sys
 import unittest
 import warnings
 
 import unittest
 import warnings
 
+from nose.plugins.skip import SkipTest
+
 import eventlet
 from eventlet import tpool
 
 import eventlet
 from eventlet import tpool
 
@@ -22,22 +27,29 @@ from eventlet import tpool
 main = unittest.main
 
 
 main = unittest.main
 
 
-def skipped(func):
-    """ Decorator that marks a function as skipped.  Uses nose's SkipTest exception
-    if installed.  Without nose, this will count skipped tests as passing tests."""
+@contextlib.contextmanager
+def assert_raises(exc_type):
     try:
     try:
-        from nose.plugins.skip import SkipTest
+        yield
+    except exc_type:
+        pass
+    else:
+        name = str(exc_type)
+        try:
+            name = exc_type.__name__
+        except AttributeError:
+            pass
+        assert False, 'Expected exception {0}'.format(name)
 
 
-        def skipme(*a, **k):
-            raise SkipTest()
-        skipme.__name__ = func.__name__
-        return skipme
-    except ImportError:
-        # no nose, we'll just skip the test ourselves
-        def skipme(*a, **k):
-            print(("Skipping {0}".format(func.__name__)))
-        skipme.__name__ = func.__name__
-        return skipme
+
+def skipped(func, *decorator_args):
+    """Decorator that marks a function as skipped.
+    """
+    @functools.wraps(func)
+    def wrapped(*a, **k):
+        raise SkipTest(*decorator_args)
+
+    return wrapped
 
 
 def skip_if(condition):
 
 
 def skip_if(condition):
@@ -47,16 +59,16 @@ def skip_if(condition):
     should return True to skip the test.
     """
     def skipped_wrapper(func):
     should return True to skip the test.
     """
     def skipped_wrapper(func):
+        @functools.wraps(func)
         def wrapped(*a, **kw):
             if isinstance(condition, bool):
                 result = condition
             else:
                 result = condition(func)
             if result:
         def wrapped(*a, **kw):
             if isinstance(condition, bool):
                 result = condition
             else:
                 result = condition(func)
             if result:
-                return skipped(func)(*a, **kw)
+                raise SkipTest()
             else:
                 return func(*a, **kw)
             else:
                 return func(*a, **kw)
-        wrapped.__name__ = func.__name__
         return wrapped
     return skipped_wrapper
 
         return wrapped
     return skipped_wrapper
 
@@ -68,16 +80,16 @@ def skip_unless(condition):
     should return True if the condition is satisfied.
     """
     def skipped_wrapper(func):
     should return True if the condition is satisfied.
     """
     def skipped_wrapper(func):
+        @functools.wraps(func)
         def wrapped(*a, **kw):
             if isinstance(condition, bool):
                 result = condition
             else:
                 result = condition(func)
             if not result:
         def wrapped(*a, **kw):
             if isinstance(condition, bool):
                 result = condition
             else:
                 result = condition(func)
             if not result:
-                return skipped(func)(*a, **kw)
+                raise SkipTest()
             else:
                 return func(*a, **kw)
             else:
                 return func(*a, **kw)
-        wrapped.__name__ = func.__name__
         return wrapped
     return skipped_wrapper
 
         return wrapped
     return skipped_wrapper
 
@@ -204,13 +216,23 @@ def check_idle_cpu_usage(duration, allowed_part):
 
 
 def verify_hub_empty():
 
 
 def verify_hub_empty():
+
+    def format_listener(listener):
+        return 'Listener %r for greenlet %r with run callback %r' % (
+            listener, listener.greenlet, getattr(listener.greenlet, 'run', None))
+
     from eventlet import hubs
     hub = hubs.get_hub()
     from eventlet import hubs
     hub = hubs.get_hub()
-    num_readers = len(hub.get_readers())
-    num_writers = len(hub.get_writers())
+    readers = hub.get_readers()
+    writers = hub.get_writers()
+    num_readers = len(readers)
+    num_writers = len(writers)
     num_timers = hub.get_timers_count()
     num_timers = hub.get_timers_count()
-    assert num_readers == 0 and num_writers == 0, "Readers: %s Writers: %s" % (
-        num_readers, num_writers)
+    assert num_readers == 0 and num_writers == 0, \
+        "Readers: %s (%d) Writers: %s (%d)" % (
+            ', '.join(map(format_listener, readers)), num_readers,
+            ', '.join(map(format_listener, writers)), num_writers,
+        )
 
 
 def find_command(command):
 
 
 def find_command(command):
@@ -245,19 +267,10 @@ def get_database_auth():
     ".test_dbauth", which contains a json map of parameters to the
     connect function.
     """
     ".test_dbauth", which contains a json map of parameters to the
     connect function.
     """
-    import os
     retval = {
         'MySQLdb': {'host': 'localhost', 'user': 'root', 'passwd': ''},
         'psycopg2': {'user': 'test'},
     }
     retval = {
         'MySQLdb': {'host': 'localhost', 'user': 'root', 'passwd': ''},
         'psycopg2': {'user': 'test'},
     }
-    try:
-        import json
-    except ImportError:
-        try:
-            import simplejson as json
-        except ImportError:
-            print("No json implementation, using baked-in db credentials.")
-            return retval
 
     if 'EVENTLET_DB_TEST_AUTH' in os.environ:
         return json.loads(os.environ.get('EVENTLET_DB_TEST_AUTH'))
 
     if 'EVENTLET_DB_TEST_AUTH' in os.environ:
         return json.loads(os.environ.get('EVENTLET_DB_TEST_AUTH'))
@@ -268,7 +281,7 @@ def get_database_auth():
         try:
             auth_utf8 = json.load(open(f))
             # Have to convert unicode objects to str objects because
         try:
             auth_utf8 = json.load(open(f))
             # Have to convert unicode objects to str objects because
-            # mysqldb is dum. Using a doubly-nested list comprehension
+            # mysqldb is dumb. Using a doubly-nested list comprehension
             # because we know that the structure is a two-level dict.
             return dict(
                 [(str(modname), dict(
             # because we know that the structure is a two-level dict.
             return dict(
                 [(str(modname), dict(
@@ -283,9 +296,9 @@ def run_python(path):
     if not path.endswith('.py'):
         path += '.py'
     path = os.path.abspath(path)
     if not path.endswith('.py'):
         path += '.py'
     path = os.path.abspath(path)
-    dir_ = os.path.dirname(path)
+    src_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
     new_env = os.environ.copy()
     new_env = os.environ.copy()
-    new_env['PYTHONPATH'] = os.pathsep.join(sys.path + [dir_])
+    new_env['PYTHONPATH'] = os.pathsep.join(sys.path + [src_dir])
     p = subprocess.Popen(
         [sys.executable, path],
         env=new_env,
     p = subprocess.Popen(
         [sys.executable, path],
         env=new_env,
@@ -297,5 +310,16 @@ def run_python(path):
     return output
 
 
     return output
 
 
+def run_isolated(path, prefix='tests/isolated/'):
+    output = run_python(prefix + path).rstrip()
+    if output.startswith(b'skip'):
+        parts = output.split(b':', 1)
+        skip_args = []
+        if len(parts) > 1:
+            skip_args.append(parts[1])
+        raise SkipTest(*skip_args)
+    assert output == b'pass', output
+
+
 certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt')
 private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key')
 certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt')
 private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key')
similarity index 99%
rename from eventlet/tests/api_test.py
rename to python-eventlet/tests/api_test.py
index f9636daa7d50d1b4218c93766976e90afa08fef4..1cdab889b5873de330ca360a09c16f3d72d6b6f0 100644 (file)
@@ -17,7 +17,7 @@ def check_hub():
     for nm in 'get_readers', 'get_writers':
         dct = getattr(hub, nm)()
         assert not dct, "hub.%s not empty: %s" % (nm, dct)
     for nm in 'get_readers', 'get_writers':
         dct = getattr(hub, nm)()
         assert not dct, "hub.%s not empty: %s" % (nm, dct)
-    hub.abort(True)
+    hub.abort(wait=True)
     assert not hub.running
 
 
     assert not hub.running
 
 
diff --git a/python-eventlet/tests/greendns_test.py b/python-eventlet/tests/greendns_test.py
new file mode 100644 (file)
index 0000000..d8112c9
--- /dev/null
@@ -0,0 +1,810 @@
+# coding: utf-8
+"""Tests for the eventlet.support.greendns module"""
+
+import os
+import socket
+import tempfile
+import time
+
+import tests
+from tests import mock
+try:
+    import dns.rdatatype
+    import dns.rdtypes.IN.A
+    import dns.rdtypes.IN.AAAA
+    import dns.resolver
+    import dns.reversename
+    import dns.rrset
+    from eventlet.support import greendns
+    greendns_available = True
+except ImportError:
+    greendns_available = False
+    greendns = mock.Mock()
+
+
+def greendns_requirement(_f):
+    """We want to skip tests if greendns is not installed.
+    """
+    return greendns_available
+
+
+class TestHostsResolver(tests.LimitedTestCase):
+
+    def _make_host_resolver(self):
+        """Returns a HostResolver instance
+
+        The hosts file will be empty but accessible as a py.path.local
+        instance using the ``hosts`` attribute.
+        """
+        hosts = tempfile.NamedTemporaryFile()
+        hr = greendns.HostsResolver(fname=hosts.name)
+        hr.hosts = hosts
+        hr._last_stat = 0
+        return hr
+
+    @tests.skip_unless(greendns_requirement)
+    def test_default_fname(self):
+        hr = greendns.HostsResolver()
+        assert os.path.exists(hr.fname)
+
+    @tests.skip_unless(greendns_requirement)
+    def test_readlines_lines(self):
+        hr = self._make_host_resolver()
+        hr.hosts.write(b'line0\n')
+        hr.hosts.flush()
+        assert hr._readlines() == ['line0']
+        hr._last_stat = 0
+        hr.hosts.write(b'line1\n')
+        hr.hosts.flush()
+        assert hr._readlines() == ['line0', 'line1']
+        hr._last_stat = 0
+        hr.hosts.write(b'#comment0\nline0\n #comment1\nline1')
+        assert hr._readlines() == ['line0', 'line1']
+
+    @tests.skip_unless(greendns_requirement)
+    def test_readlines_missing_file(self):
+        hr = self._make_host_resolver()
+        hr.hosts.close()
+        hr._last_stat = 0
+        assert hr._readlines() == []
+
+    @tests.skip_unless(greendns_requirement)
+    def test_load_no_contents(self):
+        hr = self._make_host_resolver()
+        hr._load()
+        assert not hr._v4
+        assert not hr._v6
+        assert not hr._aliases
+
+    @tests.skip_unless(greendns_requirement)
+    def test_load_v4_v6_cname_aliases(self):
+        hr = self._make_host_resolver()
+        hr.hosts.write(b'1.2.3.4 v4.example.com v4\n'
+                       b'dead:beef::1 v6.example.com v6\n')
+        hr.hosts.flush()
+        hr._load()
+        assert hr._v4 == {'v4.example.com': '1.2.3.4', 'v4': '1.2.3.4'}
+        assert hr._v6 == {'v6.example.com': 'dead:beef::1',
+                          'v6': 'dead:beef::1'}
+        assert hr._aliases == {'v4': 'v4.example.com',
+                               'v6': 'v6.example.com'}
+
+    @tests.skip_unless(greendns_requirement)
+    def test_load_v6_link_local(self):
+        hr = self._make_host_resolver()
+        hr.hosts.write(b'fe80:: foo\n'
+                       b'fe80:dead:beef::1 bar\n')
+        hr.hosts.flush()
+        hr._load()
+        assert not hr._v4
+        assert not hr._v6
+
+    @tests.skip_unless(greendns_requirement)
+    def test_query_A(self):
+        hr = self._make_host_resolver()
+        hr._v4 = {'v4.example.com': '1.2.3.4'}
+        ans = hr.query('v4.example.com')
+        assert ans[0].address == '1.2.3.4'
+
+    @tests.skip_unless(greendns_requirement)
+    def test_query_ans_types(self):
+        # This assumes test_query_A above succeeds
+        hr = self._make_host_resolver()
+        hr._v4 = {'v4.example.com': '1.2.3.4'}
+        hr._last_stat = time.time()
+        ans = hr.query('v4.example.com')
+        assert isinstance(ans, greendns.dns.resolver.Answer)
+        assert ans.response is None
+        assert ans.qname == dns.name.from_text('v4.example.com')
+        assert ans.rdtype == dns.rdatatype.A
+        assert ans.rdclass == dns.rdataclass.IN
+        assert ans.canonical_name == dns.name.from_text('v4.example.com')
+        assert ans.expiration
+        assert isinstance(ans.rrset, dns.rrset.RRset)
+        assert ans.rrset.rdtype == dns.rdatatype.A
+        assert ans.rrset.rdclass == dns.rdataclass.IN
+        ttl = greendns.HOSTS_TTL
+        assert ttl - 1 <= ans.rrset.ttl <= ttl + 1
+        rr = ans.rrset[0]
+        assert isinstance(rr, greendns.dns.rdtypes.IN.A.A)
+        assert rr.rdtype == dns.rdatatype.A
+        assert rr.rdclass == dns.rdataclass.IN
+        assert rr.address == '1.2.3.4'
+
+    @tests.skip_unless(greendns_requirement)
+    def test_query_AAAA(self):
+        hr = self._make_host_resolver()
+        hr._v6 = {'v6.example.com': 'dead:beef::1'}
+        ans = hr.query('v6.example.com', dns.rdatatype.AAAA)
+        assert ans[0].address == 'dead:beef::1'
+
+    @tests.skip_unless(greendns_requirement)
+    def test_query_unknown_raises(self):
+        hr = self._make_host_resolver()
+        with tests.assert_raises(greendns.dns.resolver.NoAnswer):
+            hr.query('example.com')
+
+    @tests.skip_unless(greendns_requirement)
+    def test_query_unknown_no_raise(self):
+        hr = self._make_host_resolver()
+        ans = hr.query('example.com', raise_on_no_answer=False)
+        assert isinstance(ans, greendns.dns.resolver.Answer)
+        assert ans.response is None
+        assert ans.qname == dns.name.from_text('example.com')
+        assert ans.rdtype == dns.rdatatype.A
+        assert ans.rdclass == dns.rdataclass.IN
+        assert ans.canonical_name == dns.name.from_text('example.com')
+        assert ans.expiration
+        assert isinstance(ans.rrset, greendns.dns.rrset.RRset)
+        assert ans.rrset.rdtype == dns.rdatatype.A
+        assert ans.rrset.rdclass == dns.rdataclass.IN
+        assert len(ans.rrset) == 0
+
+    @tests.skip_unless(greendns_requirement)
+    def test_query_CNAME(self):
+        hr = self._make_host_resolver()
+        hr._aliases = {'host': 'host.example.com'}
+        ans = hr.query('host', dns.rdatatype.CNAME)
+        assert ans[0].target == dns.name.from_text('host.example.com')
+        assert str(ans[0].target) == 'host.example.com.'
+
+    @tests.skip_unless(greendns_requirement)
+    def test_query_unknown_type(self):
+        hr = self._make_host_resolver()
+        with tests.assert_raises(greendns.dns.resolver.NoAnswer):
+            hr.query('example.com', dns.rdatatype.MX)
+
+    @tests.skip_unless(greendns_requirement)
+    def test_getaliases(self):
+        hr = self._make_host_resolver()
+        hr._aliases = {'host': 'host.example.com',
+                       'localhost': 'host.example.com'}
+        res = set(hr.getaliases('host'))
+        assert res == set(['host.example.com', 'localhost'])
+
+    @tests.skip_unless(greendns_requirement)
+    def test_getaliases_unknown(self):
+        hr = self._make_host_resolver()
+        assert hr.getaliases('host.example.com') == []
+
+    @tests.skip_unless(greendns_requirement)
+    def test_getaliases_fqdn(self):
+        hr = self._make_host_resolver()
+        hr._aliases = {'host': 'host.example.com'}
+        res = set(hr.getaliases('host.example.com'))
+        assert res == set(['host'])
+
+
+def _make_mock_base_resolver():
+    """A mocked base resolver class"""
+    class RR(object):
+        pass
+
+    class Resolver(object):
+        aliases = ['cname.example.com']
+        raises = None
+        rr = RR()
+
+        def query(self, *args, **kwargs):
+            self.args = args
+            self.kwargs = kwargs
+            if self.raises:
+                raise self.raises()
+            if hasattr(self, 'rrset'):
+                rrset = self.rrset
+            else:
+                rrset = [self.rr]
+            return greendns.HostsAnswer('foo', 1, 1, rrset, False)
+
+        def getaliases(self, *args, **kwargs):
+            return self.aliases
+
+    return Resolver
+
+
+class TestProxyResolver(tests.LimitedTestCase):
+
+    @tests.skip_unless(greendns_requirement)
+    def test_clear(self):
+        rp = greendns.ResolverProxy()
+        resolver = rp._resolver
+        rp.clear()
+        assert rp._resolver != resolver
+
+    @tests.skip_unless(greendns_requirement)
+    def _make_mock_hostsresolver(self):
+        """A mocked HostsResolver"""
+        base_resolver = _make_mock_base_resolver()
+        base_resolver.rr.address = '1.2.3.4'
+        return base_resolver()
+
+    @tests.skip_unless(greendns_requirement)
+    def _make_mock_resolver(self):
+        """A mocked Resolver"""
+        base_resolver = _make_mock_base_resolver()
+        base_resolver.rr.address = '5.6.7.8'
+        return base_resolver()
+
+    @tests.skip_unless(greendns_requirement)
+    def test_hosts(self):
+        hostsres = self._make_mock_hostsresolver()
+        rp = greendns.ResolverProxy(hostsres)
+        ans = rp.query('host.example.com')
+        assert ans[0].address == '1.2.3.4'
+
+    @tests.skip_unless(greendns_requirement)
+    def test_hosts_noanswer(self):
+        hostsres = self._make_mock_hostsresolver()
+        res = self._make_mock_resolver()
+        rp = greendns.ResolverProxy(hostsres)
+        rp._resolver = res
+        hostsres.raises = greendns.dns.resolver.NoAnswer
+        ans = rp.query('host.example.com')
+        assert ans[0].address == '5.6.7.8'
+
+    @tests.skip_unless(greendns_requirement)
+    def test_resolver(self):
+        res = self._make_mock_resolver()
+        rp = greendns.ResolverProxy()
+        rp._resolver = res
+        ans = rp.query('host.example.com')
+        assert ans[0].address == '5.6.7.8'
+
+    @tests.skip_unless(greendns_requirement)
+    def test_noanswer(self):
+        res = self._make_mock_resolver()
+        rp = greendns.ResolverProxy()
+        rp._resolver = res
+        res.raises = greendns.dns.resolver.NoAnswer
+        with tests.assert_raises(greendns.dns.resolver.NoAnswer):
+            rp.query('host.example.com')
+
+    @tests.skip_unless(greendns_requirement)
+    def test_nxdomain(self):
+        res = self._make_mock_resolver()
+        rp = greendns.ResolverProxy()
+        rp._resolver = res
+        res.raises = greendns.dns.resolver.NXDOMAIN
+        with tests.assert_raises(greendns.dns.resolver.NXDOMAIN):
+            rp.query('host.example.com')
+
+    @tests.skip_unless(greendns_requirement)
+    def test_noanswer_hosts(self):
+        hostsres = self._make_mock_hostsresolver()
+        res = self._make_mock_resolver()
+        rp = greendns.ResolverProxy(hostsres)
+        rp._resolver = res
+        hostsres.raises = greendns.dns.resolver.NoAnswer
+        res.raises = greendns.dns.resolver.NoAnswer
+        with tests.assert_raises(greendns.dns.resolver.NoAnswer):
+            rp.query('host.example.com')
+
+    def _make_mock_resolver_aliases(self):
+
+        class RR(object):
+            target = 'host.example.com'
+
+        class Resolver(object):
+            call_count = 0
+            exc_type = greendns.dns.resolver.NoAnswer
+
+            def query(self, *args, **kwargs):
+                self.args = args
+                self.kwargs = kwargs
+                self.call_count += 1
+                if self.call_count < 2:
+                    return greendns.HostsAnswer(args[0], 1, 5, [RR()], False)
+                else:
+                    raise self.exc_type()
+
+        return Resolver()
+
+    @tests.skip_unless(greendns_requirement)
+    def test_getaliases(self):
+        aliases_res = self._make_mock_resolver_aliases()
+        rp = greendns.ResolverProxy()
+        rp._resolver = aliases_res
+        aliases = set(rp.getaliases('alias.example.com'))
+        assert aliases == set(['host.example.com'])
+
+    @tests.skip_unless(greendns_requirement)
+    def test_getaliases_fqdn(self):
+        aliases_res = self._make_mock_resolver_aliases()
+        rp = greendns.ResolverProxy()
+        rp._resolver = aliases_res
+        rp._resolver.call_count = 1
+        assert rp.getaliases('host.example.com') == []
+
+    @tests.skip_unless(greendns_requirement)
+    def test_getaliases_nxdomain(self):
+        aliases_res = self._make_mock_resolver_aliases()
+        rp = greendns.ResolverProxy()
+        rp._resolver = aliases_res
+        rp._resolver.call_count = 1
+        rp._resolver.exc_type = greendns.dns.resolver.NXDOMAIN
+        assert rp.getaliases('host.example.com') == []
+
+
+class TestResolve(tests.LimitedTestCase):
+
+    def setUp(self):
+        base_resolver = _make_mock_base_resolver()
+        base_resolver.rr.address = '1.2.3.4'
+        self._old_resolver = greendns.resolver
+        greendns.resolver = base_resolver()
+
+    def tearDown(self):
+        greendns.resolver = self._old_resolver
+
+    @tests.skip_unless(greendns_requirement)
+    def test_A(self):
+        ans = greendns.resolve('host.example.com', socket.AF_INET)
+        assert ans[0].address == '1.2.3.4'
+        assert greendns.resolver.args == ('host.example.com', dns.rdatatype.A)
+
+    @tests.skip_unless(greendns_requirement)
+    def test_AAAA(self):
+        greendns.resolver.rr.address = 'dead:beef::1'
+        ans = greendns.resolve('host.example.com', socket.AF_INET6)
+        assert ans[0].address == 'dead:beef::1'
+        assert greendns.resolver.args == ('host.example.com', dns.rdatatype.AAAA)
+
+    @tests.skip_unless(greendns_requirement)
+    def test_unknown_rdtype(self):
+        with tests.assert_raises(socket.gaierror):
+            greendns.resolve('host.example.com', socket.AF_INET6 + 1)
+
+    @tests.skip_unless(greendns_requirement)
+    def test_timeout(self):
+        greendns.resolver.raises = greendns.dns.exception.Timeout
+        with tests.assert_raises(socket.gaierror):
+            greendns.resolve('host.example.com')
+
+    @tests.skip_unless(greendns_requirement)
+    def test_exc(self):
+        greendns.resolver.raises = greendns.dns.exception.DNSException
+        with tests.assert_raises(socket.gaierror):
+            greendns.resolve('host.example.com')
+
+    @tests.skip_unless(greendns_requirement)
+    def test_noraise_noanswer(self):
+        greendns.resolver.rrset = None
+        ans = greendns.resolve('example.com', raises=False)
+        assert not ans.rrset
+
+    @tests.skip_unless(greendns_requirement)
+    def test_noraise_nxdomain(self):
+        greendns.resolver.raises = greendns.dns.resolver.NXDOMAIN
+        ans = greendns.resolve('example.com', raises=False)
+        assert not ans.rrset
+
+
+class TestResolveCname(tests.LimitedTestCase):
+
+    def setUp(self):
+        base_resolver = _make_mock_base_resolver()
+        base_resolver.rr.target = 'cname.example.com'
+        self._old_resolver = greendns.resolver
+        greendns.resolver = base_resolver()
+
+    def tearDown(self):
+        greendns.resolver = self._old_resolver
+
+    @tests.skip_unless(greendns_requirement)
+    def test_success(self):
+        cname = greendns.resolve_cname('alias.example.com')
+        assert cname == 'cname.example.com'
+
+    @tests.skip_unless(greendns_requirement)
+    def test_timeout(self):
+        greendns.resolver.raises = greendns.dns.exception.Timeout
+        with tests.assert_raises(socket.gaierror):
+            greendns.resolve_cname('alias.example.com')
+
+    @tests.skip_unless(greendns_requirement)
+    def test_nodata(self):
+        greendns.resolver.raises = greendns.dns.exception.DNSException
+        with tests.assert_raises(socket.gaierror):
+            greendns.resolve_cname('alias.example.com')
+
+    @tests.skip_unless(greendns_requirement)
+    def test_no_answer(self):
+        greendns.resolver.raises = greendns.dns.resolver.NoAnswer
+        assert greendns.resolve_cname('host.example.com') == 'host.example.com'
+
+
+def _make_mock_resolve():
+    """A stubbed out resolve function
+
+    This monkeypatches the greendns.resolve() function with a mock.
+    You must give it answers by calling .add().
+    """
+
+    class MockAnswer(list):
+        pass
+
+    class MockResolve(object):
+
+        def __init__(self):
+            self.answers = {}
+
+        def __call__(self, name, family=socket.AF_INET, raises=True):
+            qname = dns.name.from_text(name)
+            try:
+                rrset = self.answers[name][family]
+            except KeyError:
+                if raises:
+                    raise greendns.dns.resolver.NoAnswer()
+                rrset = dns.rrset.RRset(qname, 1, 1)
+            ans = MockAnswer()
+            ans.qname = qname
+            ans.rrset = rrset
+            ans.extend(rrset.items)
+            return ans
+
+        def add(self, name, addr):
+            """Add an address to a name and family"""
+            try:
+                rdata = dns.rdtypes.IN.A.A(dns.rdataclass.IN,
+                                           dns.rdatatype.A, addr)
+                family = socket.AF_INET
+            except (socket.error, dns.exception.SyntaxError):
+                rdata = dns.rdtypes.IN.AAAA.AAAA(dns.rdataclass.IN,
+                                                 dns.rdatatype.AAAA, addr)
+                family = socket.AF_INET6
+            family_dict = self.answers.setdefault(name, {})
+            rrset = family_dict.get(family)
+            if not rrset:
+                family_dict[family] = rrset = dns.rrset.RRset(
+                    dns.name.from_text(name), rdata.rdclass, rdata.rdtype)
+            rrset.add(rdata)
+
+    resolve = MockResolve()
+    return resolve
+
+
+class TestGetaddrinfo(tests.LimitedTestCase):
+
+    def _make_mock_resolve_cname(self):
+        """A stubbed out cname function"""
+
+        class ResolveCname(object):
+            qname = None
+            cname = 'cname.example.com'
+
+            def __call__(self, host):
+                self.qname = host
+                return self.cname
+
+        resolve_cname = ResolveCname()
+        return resolve_cname
+
+    def setUp(self):
+        self._old_resolve = greendns.resolve
+        self._old_resolve_cname = greendns.resolve_cname
+        self._old_orig_getaddrinfo = greendns.socket.getaddrinfo
+
+    def tearDown(self):
+        greendns.resolve = self._old_resolve
+        greendns.resolve_cname = self._old_resolve_cname
+        greendns.socket.getaddrinfo = self._old_orig_getaddrinfo
+
+    @tests.skip_unless(greendns_requirement)
+    def test_getaddrinfo(self):
+        greendns.resolve = _make_mock_resolve()
+        greendns.resolve.add('example.com', '127.0.0.2')
+        greendns.resolve.add('example.com', '::1')
+        res = greendns.getaddrinfo('example.com', 'ssh')
+        addr = ('127.0.0.2', 22)
+        tcp = (socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr)
+        udp = (socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr)
+        addr = ('::1', 22, 0, 0)
+        tcp6 = (socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr)
+        udp6 = (socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr)
+        filt_res = [ai[:3] + (ai[4],) for ai in res]
+        assert tcp in filt_res
+        assert udp in filt_res
+        assert tcp6 in filt_res
+        assert udp6 in filt_res
+
+    @tests.skip_unless(greendns_requirement)
+    def test_getaddrinfo_idn(self):
+        greendns.resolve = _make_mock_resolve()
+        idn_name = u'евентлет.com'
+        greendns.resolve.add(idn_name.encode('idna').decode('ascii'), '127.0.0.2')
+        res = greendns.getaddrinfo(idn_name, 'ssh')
+        addr = ('127.0.0.2', 22)
+        tcp = (socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr)
+        udp = (socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr)
+        filt_res = [ai[:3] + (ai[4],) for ai in res]
+        assert tcp in filt_res
+        assert udp in filt_res
+
+    @tests.skip_unless(greendns_requirement)
+    def test_getaddrinfo_inet(self):
+        greendns.resolve = _make_mock_resolve()
+        greendns.resolve.add('example.com', '127.0.0.2')
+        res = greendns.getaddrinfo('example.com', 'ssh', socket.AF_INET)
+        addr = ('127.0.0.2', 22)
+        tcp = (socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr)
+        udp = (socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr)
+        assert tcp in [ai[:3] + (ai[4],) for ai in res]
+        assert udp in [ai[:3] + (ai[4],) for ai in res]
+
+    @tests.skip_unless(greendns_requirement)
+    def test_getaddrinfo_inet6(self):
+        greendns.resolve = _make_mock_resolve()
+        greendns.resolve.add('example.com', '::1')
+        res = greendns.getaddrinfo('example.com', 'ssh', socket.AF_INET6)
+        addr = ('::1', 22, 0, 0)
+        tcp = (socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr)
+        udp = (socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr)
+        assert tcp in [ai[:3] + (ai[4],) for ai in res]
+        assert udp in [ai[:3] + (ai[4],) for ai in res]
+
+    @tests.skip_unless(greendns_requirement)
+    def test_getaddrinfo_only_a_ans(self):
+        greendns.resolve = _make_mock_resolve()
+        greendns.resolve.add('example.com', '1.2.3.4')
+        res = greendns.getaddrinfo('example.com', 0)
+        addr = [('1.2.3.4', 0)] * len(res)
+        assert addr == [ai[-1] for ai in res]
+
+    @tests.skip_unless(greendns_requirement)
+    def test_getaddrinfo_only_aaaa_ans(self):
+        greendns.resolve = _make_mock_resolve()
+        greendns.resolve.add('example.com', 'dead:beef::1')
+        res = greendns.getaddrinfo('example.com', 0)
+        addr = [('dead:beef::1', 0, 0, 0)] * len(res)
+        assert addr == [ai[-1] for ai in res]
+
+    @tests.skip_unless(greendns_requirement)
+    def test_canonname(self):
+        greendns.resolve = _make_mock_resolve()
+        greendns.resolve.add('host.example.com', '1.2.3.4')
+        greendns.resolve_cname = self._make_mock_resolve_cname()
+        res = greendns.getaddrinfo('host.example.com', 0,
+                                   0, 0, 0, socket.AI_CANONNAME)
+        assert res[0][3] == 'cname.example.com'
+
+    @tests.skip_unless(greendns_requirement)
+    def test_host_none(self):
+        res = greendns.getaddrinfo(None, 80)
+        for addr in set(ai[-1] for ai in res):
+            assert addr in [('127.0.0.1', 80), ('::1', 80, 0, 0)]
+
+    @tests.skip_unless(greendns_requirement)
+    def test_host_none_passive(self):
+        res = greendns.getaddrinfo(None, 80, 0, 0, 0, socket.AI_PASSIVE)
+        for addr in set(ai[-1] for ai in res):
+            assert addr in [('0.0.0.0', 80), ('::', 80, 0, 0)]
+
+    @tests.skip_unless(greendns_requirement)
+    def test_v4mapped(self):
+        greendns.resolve = _make_mock_resolve()
+        greendns.resolve.add('example.com', '1.2.3.4')
+        res = greendns.getaddrinfo('example.com', 80,
+                                   socket.AF_INET6, 0, 0, socket.AI_V4MAPPED)
+        addrs = set(ai[-1] for ai in res)
+        assert addrs == set([('::ffff:1.2.3.4', 80, 0, 0)])
+
+    @tests.skip_unless(greendns_requirement)
+    def test_v4mapped_all(self):
+        greendns.resolve = _make_mock_resolve()
+        greendns.resolve.add('example.com', '1.2.3.4')
+        greendns.resolve.add('example.com', 'dead:beef::1')
+        res = greendns.getaddrinfo('example.com', 80, socket.AF_INET6, 0, 0,
+                                   socket.AI_V4MAPPED | socket.AI_ALL)
+        addrs = set(ai[-1] for ai in res)
+        for addr in addrs:
+            assert addr in [('::ffff:1.2.3.4', 80, 0, 0),
+                            ('dead:beef::1', 80, 0, 0)]
+
+    @tests.skip_unless(greendns_requirement)
+    def test_numericserv(self):
+        greendns.resolve = _make_mock_resolve()
+        greendns.resolve.add('example.com', '1.2.3.4')
+        with tests.assert_raises(socket.gaierror):
+            greendns.getaddrinfo('example.com', 'www', 0, 0, 0, socket.AI_NUMERICSERV)
+
+    @tests.skip_unless(greendns_requirement)
+    def test_numerichost(self):
+        greendns.resolve = _make_mock_resolve()
+        greendns.resolve.add('example.com', '1.2.3.4')
+        with tests.assert_raises(socket.gaierror):
+            greendns.getaddrinfo('example.com', 80, 0, 0, 0, socket.AI_NUMERICHOST)
+
+    @tests.skip_unless(greendns_requirement)
+    def test_noport(self):
+        greendns.resolve = _make_mock_resolve()
+        greendns.resolve.add('example.com', '1.2.3.4')
+        ai = greendns.getaddrinfo('example.com', None)
+        assert ai[0][-1][1] == 0
+
+    @tests.skip_unless(greendns_requirement)
+    def test_AI_ADDRCONFIG(self):
+        # When the users sets AI_ADDRCONFIG but only has an IPv4
+        # address configured we will iterate over the results, but the
+        # call for the IPv6 address will fail rather then return an
+        # empty list.  In that case we should catch the exception and
+        # only return the ones which worked.
+        def getaddrinfo(addr, port, family, socktype, proto, aiflags):
+            if addr == '127.0.0.1':
+                return [(socket.AF_INET, 1, 0, '', ('127.0.0.1', 0))]
+            elif addr == '::1' and aiflags & socket.AI_ADDRCONFIG:
+                raise socket.error(socket.EAI_ADDRFAMILY,
+                                   'Address family for hostname not supported')
+            elif addr == '::1' and not aiflags & socket.AI_ADDRCONFIG:
+                return [(socket.AF_INET6, 1, 0, '', ('::1', 0, 0, 0))]
+        greendns.socket.getaddrinfo = getaddrinfo
+        greendns.resolve = _make_mock_resolve()
+        greendns.resolve.add('localhost', '127.0.0.1')
+        greendns.resolve.add('localhost', '::1')
+        res = greendns.getaddrinfo('localhost', None,
+                                   0, 0, 0, socket.AI_ADDRCONFIG)
+        assert res == [(socket.AF_INET, 1, 0, '', ('127.0.0.1', 0))]
+
+    @tests.skip_unless(greendns_requirement)
+    def test_AI_ADDRCONFIG_noaddr(self):
+        # If AI_ADDRCONFIG is used but there is no address we need to
+        # get an exception, not an empty list.
+        def getaddrinfo(addr, port, family, socktype, proto, aiflags):
+            raise socket.error(socket.EAI_ADDRFAMILY,
+                               'Address family for hostname not supported')
+        greendns.socket.getaddrinfo = getaddrinfo
+        greendns.resolve = _make_mock_resolve()
+        try:
+            greendns.getaddrinfo('::1', None, 0, 0, 0, socket.AI_ADDRCONFIG)
+        except socket.error as e:
+            assert e.errno == socket.EAI_ADDRFAMILY
+
+
+class TestIsIpAddr(tests.LimitedTestCase):
+
+    @tests.skip_unless(greendns_requirement)
+    def test_isv4(self):
+        assert greendns.is_ipv4_addr('1.2.3.4')
+
+    @tests.skip_unless(greendns_requirement)
+    def test_isv4_false(self):
+        assert not greendns.is_ipv4_addr('260.0.0.0')
+
+    @tests.skip_unless(greendns_requirement)
+    def test_isv6(self):
+        assert greendns.is_ipv6_addr('dead:beef::1')
+
+    @tests.skip_unless(greendns_requirement)
+    def test_isv6_invalid(self):
+        assert not greendns.is_ipv6_addr('foobar::1')
+
+    @tests.skip_unless(greendns_requirement)
+    def test_v4(self):
+        assert greendns.is_ip_addr('1.2.3.4')
+
+    @tests.skip_unless(greendns_requirement)
+    def test_v4_illegal(self):
+        assert not greendns.is_ip_addr('300.0.0.1')
+
+    @tests.skip_unless(greendns_requirement)
+    def test_v6_addr(self):
+        assert greendns.is_ip_addr('::1')
+
+    @tests.skip_unless(greendns_requirement)
+    def test_isv4_none(self):
+        assert not greendns.is_ipv4_addr(None)
+
+    @tests.skip_unless(greendns_requirement)
+    def test_isv6_none(self):
+        assert not greendns.is_ipv6_addr(None)
+
+    @tests.skip_unless(greendns_requirement)
+    def test_none(self):
+        assert not greendns.is_ip_addr(None)
+
+
+class TestGethostbyname(tests.LimitedTestCase):
+
+    def setUp(self):
+        self._old_resolve = greendns.resolve
+        greendns.resolve = _make_mock_resolve()
+
+    def tearDown(self):
+        greendns.resolve = self._old_resolve
+
+    @tests.skip_unless(greendns_requirement)
+    def test_ipaddr(self):
+        assert greendns.gethostbyname('1.2.3.4') == '1.2.3.4'
+
+    @tests.skip_unless(greendns_requirement)
+    def test_name(self):
+        greendns.resolve.add('host.example.com', '1.2.3.4')
+        assert greendns.gethostbyname('host.example.com') == '1.2.3.4'
+
+
+class TestGetaliases(tests.LimitedTestCase):
+
+    def _make_mock_resolver(self):
+        base_resolver = _make_mock_base_resolver()
+        resolver = base_resolver()
+        resolver.aliases = ['cname.example.com']
+        return resolver
+
+    def setUp(self):
+        self._old_resolver = greendns.resolver
+        greendns.resolver = self._make_mock_resolver()
+
+    def tearDown(self):
+        greendns.resolver = self._old_resolver
+
+    @tests.skip_unless(greendns_requirement)
+    def test_getaliases(self):
+        assert greendns.getaliases('host.example.com') == ['cname.example.com']
+
+
+class TestGethostbyname_ex(tests.LimitedTestCase):
+
+    def _make_mock_getaliases(self):
+
+        class GetAliases(object):
+            aliases = ['cname.example.com']
+
+            def __call__(self, *args, **kwargs):
+                return self.aliases
+
+        getaliases = GetAliases()
+        return getaliases
+
+    def setUp(self):
+        self._old_resolve = greendns.resolve
+        greendns.resolve = _make_mock_resolve()
+        self._old_getaliases = greendns.getaliases
+
+    def tearDown(self):
+        greendns.resolve = self._old_resolve
+        greendns.getaliases = self._old_getaliases
+
+    @tests.skip_unless(greendns_requirement)
+    def test_ipaddr(self):
+        res = greendns.gethostbyname_ex('1.2.3.4')
+        assert res == ('1.2.3.4', [], ['1.2.3.4'])
+
+    @tests.skip_unless(greendns_requirement)
+    def test_name(self):
+        greendns.resolve.add('host.example.com', '1.2.3.4')
+        greendns.getaliases = self._make_mock_getaliases()
+        greendns.getaliases.aliases = []
+        res = greendns.gethostbyname_ex('host.example.com')
+        assert res == ('host.example.com', [], ['1.2.3.4'])
+
+    @tests.skip_unless(greendns_requirement)
+    def test_multiple_addrs(self):
+        greendns.resolve.add('host.example.com', '1.2.3.4')
+        greendns.resolve.add('host.example.com', '1.2.3.5')
+        greendns.getaliases = self._make_mock_getaliases()
+        greendns.getaliases.aliases = []
+        res = greendns.gethostbyname_ex('host.example.com')
+        assert res == ('host.example.com', [], ['1.2.3.4', '1.2.3.5'])
+
+
+def test_reverse_name():
+    tests.run_isolated('greendns_from_address_203.py')
similarity index 94%
rename from eventlet/tests/greenio_test.py
rename to python-eventlet/tests/greenio_test.py
index 4b375bae13dc52c59a53685545d3b8b25e1530e1..8a94b7bddd80d1b1d45460ddac34e0f15192260d 100644 (file)
@@ -15,10 +15,7 @@ from eventlet import event, greenio, debug
 from eventlet.hubs import get_hub
 from eventlet.green import select, socket, time, ssl
 from eventlet.support import capture_stderr, get_errno, six
 from eventlet.hubs import get_hub
 from eventlet.green import select, socket, time, ssl
 from eventlet.support import capture_stderr, get_errno, six
-from tests import (
-    LimitedTestCase, main,
-    skip_with_pyevent, skipped, skip_if, skip_on_windows,
-)
+import tests
 
 
 if six.PY3:
 
 
 if six.PY3:
@@ -58,7 +55,7 @@ def using_kqueue_hub(_f):
         return False
 
 
         return False
 
 
-class TestGreenSocket(LimitedTestCase):
+class TestGreenSocket(tests.LimitedTestCase):
     def assertWriteToClosedFileRaises(self, fd):
         if sys.version_info[0] < 3:
             # 2.x socket._fileobjects are odd: writes don't check
     def assertWriteToClosedFileRaises(self, fd):
         if sys.version_info[0] < 3:
             # 2.x socket._fileobjects are odd: writes don't check
@@ -481,7 +478,7 @@ class TestGreenSocket(LimitedTestCase):
         server.close()
         client.close()
 
         server.close()
         client.close()
 
-    @skip_with_pyevent
+    @tests.skip_with_pyevent
     def test_raised_multiple_readers(self):
         debug.hub_prevent_multiple_readers(True)
 
     def test_raised_multiple_readers(self):
         debug.hub_prevent_multiple_readers(True)
 
@@ -503,9 +500,9 @@ class TestGreenSocket(LimitedTestCase):
         s.sendall(b'b')
         a.wait()
 
         s.sendall(b'b')
         a.wait()
 
-    @skip_with_pyevent
-    @skip_if(using_epoll_hub)
-    @skip_if(using_kqueue_hub)
+    @tests.skip_with_pyevent
+    @tests.skip_if(using_epoll_hub)
+    @tests.skip_if(using_kqueue_hub)
     def test_closure(self):
         def spam_to_me(address):
             sock = eventlet.connect(address)
     def test_closure(self):
         def spam_to_me(address):
             sock = eventlet.connect(address)
@@ -612,6 +609,12 @@ class TestGreenSocket(LimitedTestCase):
         assert select.select([], [s1], [], 0) == ([], [s1], [])
         assert select.select([], [s1], [], 0) == ([], [s1], [])
 
         assert select.select([], [s1], [], 0) == ([], [s1], [])
         assert select.select([], [s1], [], 0) == ([], [s1], [])
 
+    def test_shutdown_safe(self):
+        sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+        sock.close()
+        # should not raise
+        greenio.shutdown_safe(sock)
+
 
 def test_get_fileno_of_a_socket_works():
     class DummySocket(object):
 
 def test_get_fileno_of_a_socket_works():
     class DummySocket(object):
@@ -624,11 +627,15 @@ def test_get_fileno_of_an_int_works():
     assert select.get_fileno(123) == 123
 
 
     assert select.get_fileno(123) == 123
 
 
+expected_get_fileno_type_error_message = (
+    'Expected int or long, got <%s \'str\'>' % ('type' if six.PY2 else 'class'))
+
+
 def test_get_fileno_of_wrong_type_fails():
     try:
         select.get_fileno('foo')
     except TypeError as ex:
 def test_get_fileno_of_wrong_type_fails():
     try:
         select.get_fileno('foo')
     except TypeError as ex:
-        assert str(ex) == 'Expected int or long, got <type \'str\'>'
+        assert str(ex) == expected_get_fileno_type_error_message
     else:
         assert False, 'Expected TypeError not raised'
 
     else:
         assert False, 'Expected TypeError not raised'
 
@@ -640,13 +647,13 @@ def test_get_fileno_of_a_socket_with_fileno_returning_wrong_type_fails():
     try:
         select.get_fileno(DummySocket())
     except TypeError as ex:
     try:
         select.get_fileno(DummySocket())
     except TypeError as ex:
-        assert str(ex) == 'Expected int or long, got <type \'str\'>'
+        assert str(ex) == expected_get_fileno_type_error_message
     else:
         assert False, 'Expected TypeError not raised'
 
 
     else:
         assert False, 'Expected TypeError not raised'
 
 
-class TestGreenPipe(LimitedTestCase):
-    @skip_on_windows
+class TestGreenPipe(tests.LimitedTestCase):
+    @tests.skip_on_windows
     def setUp(self):
         super(self.__class__, self).setUp()
         self.tempdir = tempfile.mkdtemp('_green_pipe_test')
     def setUp(self):
         super(self.__class__, self).setUp()
         self.tempdir = tempfile.mkdtemp('_green_pipe_test')
@@ -657,8 +664,8 @@ class TestGreenPipe(LimitedTestCase):
 
     def test_pipe(self):
         r, w = os.pipe()
 
     def test_pipe(self):
         r, w = os.pipe()
-        rf = greenio.GreenPipe(r, 'r')
-        wf = greenio.GreenPipe(w, 'w', 0)
+        rf = greenio.GreenPipe(r, 'rb')
+        wf = greenio.GreenPipe(w, 'wb', 0)
 
         def sender(f, content):
             for ch in map(six.int2byte, six.iterbytes(content)):
 
         def sender(f, content):
             for ch in map(six.int2byte, six.iterbytes(content)):
@@ -680,8 +687,8 @@ class TestGreenPipe(LimitedTestCase):
         # also ensures that readline() terminates on '\n' and '\r\n'
         r, w = os.pipe()
 
         # also ensures that readline() terminates on '\n' and '\r\n'
         r, w = os.pipe()
 
-        r = greenio.GreenPipe(r)
-        w = greenio.GreenPipe(w, 'w')
+        r = greenio.GreenPipe(r, 'rb')
+        w = greenio.GreenPipe(w, 'wb')
 
         def writer():
             eventlet.sleep(.1)
 
         def writer():
             eventlet.sleep(.1)
@@ -707,8 +714,8 @@ class TestGreenPipe(LimitedTestCase):
     def test_pipe_writes_large_messages(self):
         r, w = os.pipe()
 
     def test_pipe_writes_large_messages(self):
         r, w = os.pipe()
 
-        r = greenio.GreenPipe(r)
-        w = greenio.GreenPipe(w, 'w')
+        r = greenio.GreenPipe(r, 'rb')
+        w = greenio.GreenPipe(w, 'wb')
 
         large_message = b"".join([1024 * six.int2byte(i) for i in range(65)])
 
 
         large_message = b"".join([1024 * six.int2byte(i) for i in range(65)])
 
@@ -720,7 +727,7 @@ class TestGreenPipe(LimitedTestCase):
 
         for i in range(65):
             buf = r.read(1024)
 
         for i in range(65):
             buf = r.read(1024)
-            expected = 1024 * chr(i)
+            expected = 1024 * six.int2byte(i)
             self.assertEqual(
                 buf, expected,
                 "expected=%r..%r, found=%r..%r iter=%d"
             self.assertEqual(
                 buf, expected,
                 "expected=%r..%r, found=%r..%r iter=%d"
@@ -728,7 +735,7 @@ class TestGreenPipe(LimitedTestCase):
         gt.wait()
 
     def test_seek_on_buffered_pipe(self):
         gt.wait()
 
     def test_seek_on_buffered_pipe(self):
-        f = greenio.GreenPipe(self.tempdir + "/TestFile", 'w+', 1024)
+        f = greenio.GreenPipe(self.tempdir + "/TestFile", 'wb+', 1024)
         self.assertEqual(f.tell(), 0)
         f.seek(0, 2)
         self.assertEqual(f.tell(), 0)
         self.assertEqual(f.tell(), 0)
         f.seek(0, 2)
         self.assertEqual(f.tell(), 0)
@@ -737,31 +744,31 @@ class TestGreenPipe(LimitedTestCase):
         self.assertEqual(f.tell(), 10)
         f.seek(0)
         value = f.read(1)
         self.assertEqual(f.tell(), 10)
         f.seek(0)
         value = f.read(1)
-        self.assertEqual(value, '1')
+        self.assertEqual(value, b'1')
         self.assertEqual(f.tell(), 1)
         value = f.read(1)
         self.assertEqual(f.tell(), 1)
         value = f.read(1)
-        self.assertEqual(value, '2')
+        self.assertEqual(value, b'2')
         self.assertEqual(f.tell(), 2)
         f.seek(0, 1)
         self.assertEqual(f.tell(), 2)
         f.seek(0, 1)
-        self.assertEqual(f.readline(), '34567890')
+        self.assertEqual(f.readline(), b'34567890')
         f.seek(-5, 1)
         f.seek(-5, 1)
-        self.assertEqual(f.readline(), '67890')
+        self.assertEqual(f.readline(), b'67890')
         f.seek(0)
         f.seek(0)
-        self.assertEqual(f.readline(), '1234567890')
+        self.assertEqual(f.readline(), b'1234567890')
         f.seek(0, 2)
         f.seek(0, 2)
-        self.assertEqual(f.readline(), '')
+        self.assertEqual(f.readline(), b'')
 
     def test_truncate(self):
 
     def test_truncate(self):
-        f = greenio.GreenPipe(self.tempdir + "/TestFile", 'w+', 1024)
+        f = greenio.GreenPipe(self.tempdir + "/TestFile", 'wb+', 1024)
         f.write(b'1234567890')
         f.truncate(9)
         self.assertEqual(f.tell(), 9)
 
 
         f.write(b'1234567890')
         f.truncate(9)
         self.assertEqual(f.tell(), 9)
 
 
-class TestGreenIoLong(LimitedTestCase):
+class TestGreenIoLong(tests.LimitedTestCase):
     TEST_TIMEOUT = 10  # the test here might take a while depending on the OS
 
     TEST_TIMEOUT = 10  # the test here might take a while depending on the OS
 
-    @skip_with_pyevent
+    @tests.skip_with_pyevent
     def test_multiple_readers(self, clibufsize=False):
         debug.hub_prevent_multiple_readers(False)
         recvsize = 2 * min_buf_size()
     def test_multiple_readers(self, clibufsize=False):
         debug.hub_prevent_multiple_readers(False)
         recvsize = 2 * min_buf_size()
@@ -814,21 +821,21 @@ class TestGreenIoLong(LimitedTestCase):
         assert len(results2) > 0
         debug.hub_prevent_multiple_readers()
 
         assert len(results2) > 0
         debug.hub_prevent_multiple_readers()
 
-    @skipped  # by rdw because it fails but it's not clear how to make it pass
-    @skip_with_pyevent
+    @tests.skipped  # by rdw because it fails but it's not clear how to make it pass
+    @tests.skip_with_pyevent
     def test_multiple_readers2(self):
         self.test_multiple_readers(clibufsize=True)
 
 
     def test_multiple_readers2(self):
         self.test_multiple_readers(clibufsize=True)
 
 
-class TestGreenIoStarvation(LimitedTestCase):
+class TestGreenIoStarvation(tests.LimitedTestCase):
     # fixme: this doesn't succeed, because of eventlet's predetermined
     # ordering.  two processes, one with server, one with client eventlets
     # might be more reliable?
 
     TEST_TIMEOUT = 300  # the test here might take a while depending on the OS
 
     # fixme: this doesn't succeed, because of eventlet's predetermined
     # ordering.  two processes, one with server, one with client eventlets
     # might be more reliable?
 
     TEST_TIMEOUT = 300  # the test here might take a while depending on the OS
 
-    @skipped  # by rdw, because it fails but it's not clear how to make it pass
-    @skip_with_pyevent
+    @tests.skipped  # by rdw, because it fails but it's not clear how to make it pass
+    @tests.skip_with_pyevent
     def test_server_starvation(self, sendloops=15):
         recvsize = 2 * min_buf_size()
         sendsize = 10000 * recvsize
     def test_server_starvation(self, sendloops=15):
         recvsize = 2 * min_buf_size()
         sendsize = 10000 * recvsize
@@ -945,5 +952,5 @@ def test_socket_del_fails_gracefully_when_not_fully_initialized():
     assert err.getvalue() == ''
 
 
     assert err.getvalue() == ''
 
 
-if __name__ == '__main__':
-    main()
+def test_double_close_219():
+    tests.run_isolated('greenio_double_close_219.py')
similarity index 99%
rename from eventlet/tests/greenpool_test.py
rename to python-eventlet/tests/greenpool_test.py
index 069ed5a14b956b41065872cf634740d2d1a294fe..ff56262ee2a76dd99b6baea9043c812084c671fc 100644 (file)
@@ -202,7 +202,7 @@ class GreenPool(tests.LimitedTestCase):
             timer = eventlet.Timeout(0, RuntimeError())
             try:
                 tp.get()
             timer = eventlet.Timeout(0, RuntimeError())
             try:
                 tp.get()
-                self.fail("Shouldn't have recieved anything from the pool")
+                self.fail("Shouldn't have received anything from the pool")
             except RuntimeError:
                 return 'timed out'
             else:
             except RuntimeError:
                 return 'timed out'
             else:
similarity index 98%
rename from eventlet/tests/hub_test.py
rename to python-eventlet/tests/hub_test.py
index f1012a998109bb783f563c4c34acb4bd23b68a92..07e502aff18f5c00f3e1d52421931620ea4ad123 100644 (file)
@@ -298,7 +298,7 @@ class TestFork(LimitedTestCase):
     def test_fork(self):
         output = tests.run_python('tests/hub_test_fork.py')
         lines = output.splitlines()
     def test_fork(self):
         output = tests.run_python('tests/hub_test_fork.py')
         lines = output.splitlines()
-        self.assertEqual(lines, ["accept blocked", "child died ok"], output)
+        self.assertEqual(lines, [b"accept blocked", b"child died ok"], output)
 
 
 class TestDeadRunLoop(LimitedTestCase):
 
 
 class TestDeadRunLoop(LimitedTestCase):
@@ -330,7 +330,7 @@ class TestDeadRunLoop(LimitedTestCase):
         with eventlet.Timeout(0.5, self.CustomException()):
             # we now switch to the hub, there should be no existing timers
             # that switch back to this greenlet and so this hub.switch()
         with eventlet.Timeout(0.5, self.CustomException()):
             # we now switch to the hub, there should be no existing timers
             # that switch back to this greenlet and so this hub.switch()
-            # call should block indefinately.
+            # call should block indefinitely.
             self.assertRaises(self.CustomException, hub.switch)
 
     def test_parent(self):
             self.assertRaises(self.CustomException, hub.switch)
 
     def test_parent(self):
@@ -352,7 +352,7 @@ class TestDeadRunLoop(LimitedTestCase):
             # we now switch to the hub which will allow
             # completion of dummyproc.
             # this should return execution back to the runloop and not
             # we now switch to the hub which will allow
             # completion of dummyproc.
             # this should return execution back to the runloop and not
-            # this greenlet so that hub.switch() would block indefinately.
+            # this greenlet so that hub.switch() would block indefinitely.
             self.assertRaises(self.CustomException, hub.switch)
         assert g.dead  # sanity check that dummyproc has completed
 
             self.assertRaises(self.CustomException, hub.switch)
         assert g.dead  # sanity check that dummyproc has completed
 
@@ -376,8 +376,9 @@ try:
 except AttributeError:
     pass
 
 except AttributeError:
     pass
 
-import __builtin__
-original_import = __builtin__.__import__
+from eventlet.support.six.moves import builtins
+
+original_import = builtins.__import__
 
 def fail_import(name, *args, **kwargs):
     if 'epoll' in name:
 
 def fail_import(name, *args, **kwargs):
     if 'epoll' in name:
@@ -386,7 +387,7 @@ def fail_import(name, *args, **kwargs):
         print('kqueue tried')
     return original_import(name, *args, **kwargs)
 
         print('kqueue tried')
     return original_import(name, *args, **kwargs)
 
-__builtin__.__import__ = fail_import
+builtins.__import__ = fail_import
 
 
 import eventlet.hubs
 
 
 import eventlet.hubs
diff --git a/python-eventlet/tests/isolated/greendns_from_address_203.py b/python-eventlet/tests/isolated/greendns_from_address_203.py
new file mode 100644 (file)
index 0000000..5c8bb14
--- /dev/null
@@ -0,0 +1,16 @@
+__test__ = False
+
+
+def main():
+    import eventlet
+    try:
+        from dns import reversename
+    except ImportError:
+        print('skip:require dns (package dnspython)')
+        return
+    eventlet.monkey_patch(all=True)
+    reversename.from_address('127.0.0.1')
+    print('pass')
+
+if __name__ == '__main__':
+    main()
diff --git a/python-eventlet/tests/isolated/greenio_double_close_219.py b/python-eventlet/tests/isolated/greenio_double_close_219.py
new file mode 100644 (file)
index 0000000..9881ce8
--- /dev/null
@@ -0,0 +1,22 @@
+__test__ = False
+
+
+def main():
+    import eventlet
+    eventlet.monkey_patch()
+    import subprocess
+    import gc
+
+    p = subprocess.Popen(['ls'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    # the following line creates a _SocketDuckForFd(3) and .close()s it, but the
+    # object has not been collected by the GC yet
+    p.communicate()
+
+    f = open('/dev/null', 'rb')    # f.fileno() == 3
+    gc.collect() # this calls the __del__ of _SocketDuckForFd(3), close()ing it again
+
+    f.close() # OSError, because the fd 3 has already been closed
+    print('pass')
+
+if __name__ == '__main__':
+    main()
diff --git a/python-eventlet/tests/isolated/mysqldb_monkey_patch.py b/python-eventlet/tests/isolated/mysqldb_monkey_patch.py
new file mode 100644 (file)
index 0000000..8522d06
--- /dev/null
@@ -0,0 +1,14 @@
+from __future__ import print_function
+
+# no standard tests in this file, ignore
+__test__ = False
+
+if __name__ == '__main__':
+    import MySQLdb as m
+    from eventlet import patcher
+    from eventlet.green import MySQLdb as gm
+    patcher.monkey_patch(all=True, MySQLdb=True)
+    patched_set = set(patcher.already_patched) - set(['psycopg'])
+    assert patched_set == frozenset(['MySQLdb', 'os', 'select', 'socket', 'thread', 'time'])
+    assert m.connect == gm.connect
+    print('pass')
similarity index 96%
rename from eventlet/tests/patcher_test_importlib_lock.py
rename to python-eventlet/tests/isolated/patcher_importlib_lock.py
index 8f7cea70627b4753561d2d4d2bdd9a93350f950d..b6e5a11c25b10e4aaf576fbef35ca79d89fa4110 100644 (file)
@@ -27,4 +27,4 @@ if __name__ == '__main__':
     do_import()
 
     thread.join()
     do_import()
 
     thread.join()
-    print('ok')
+    print('pass')
diff --git a/python-eventlet/tests/isolated/patcher_threading_condition.py b/python-eventlet/tests/isolated/patcher_threading_condition.py
new file mode 100644 (file)
index 0000000..5bce0f2
--- /dev/null
@@ -0,0 +1,23 @@
+# Issue #185: test threading.Condition with monkey-patching
+import eventlet
+
+# no standard tests in this file, ignore
+__test__ = False
+
+
+if __name__ == '__main__':
+    eventlet.monkey_patch()
+
+    import threading
+
+    def func(c):
+        with c:
+            c.notify()
+
+    c = threading.Condition()
+    with c:
+        t = threading.Thread(target=func, args=(c,))
+        t.start()
+        c.wait()
+
+    print('pass')
diff --git a/python-eventlet/tests/isolated/patcher_threading_join.py b/python-eventlet/tests/isolated/patcher_threading_join.py
new file mode 100644 (file)
index 0000000..4361f52
--- /dev/null
@@ -0,0 +1,23 @@
+# Issue #223: test threading.Thread.join with monkey-patching
+import eventlet
+
+# no standard tests in this file, ignore
+__test__ = False
+
+
+if __name__ == '__main__':
+    eventlet.monkey_patch()
+
+    import threading
+    import time
+
+    sleeper = threading.Thread(target=time.sleep, args=(1,))
+    start = time.time()
+    sleeper.start()
+    sleeper.join()
+    dt = time.time() - start
+
+    if dt < 1.0:
+        raise Exception("test failed: dt=%s" % dt)
+
+    print('pass')
similarity index 53%
rename from eventlet/tests/wsgi_test_conntimeout.py
rename to python-eventlet/tests/isolated/wsgi_connection_timeout.py
index d925a042b82544ba1624e72f1c97860bd71ce15c..6a8c6233d8992408d800bf1e3c7fdd5a671269b0 100644 (file)
@@ -22,13 +22,10 @@ server / client accept() conn - ExplodingConnectionWrap
     V  V  V
 connection makefile() file objects - ExplodingSocketFile <-- these raise
 """
     V  V  V
 connection makefile() file objects - ExplodingSocketFile <-- these raise
 """
-from __future__ import print_function
-
-import eventlet
-
 import socket
 import socket
-import sys
 
 
+import eventlet
+from eventlet.support import six
 import tests.wsgi_test
 
 
 import tests.wsgi_test
 
 
@@ -36,6 +33,17 @@ import tests.wsgi_test
 __test__ = False
 
 
 __test__ = False
 
 
+TAG_BOOM = "=== ~* BOOM *~ ==="
+
+output_buffer = []
+
+
+class BufferLog(object):
+    @staticmethod
+    def write(s):
+        output_buffer.append(s.rstrip())
+
+
 # This test might make you wince
 class NaughtySocketAcceptWrap(object):
     # server's socket.accept(); patches resulting connection sockets
 # This test might make you wince
 class NaughtySocketAcceptWrap(object):
     # server's socket.accept(); patches resulting connection sockets
@@ -53,12 +61,12 @@ class NaughtySocketAcceptWrap(object):
             conn_wrap.unwrap()
 
     def arm(self):
             conn_wrap.unwrap()
 
     def arm(self):
-        print("ca-click")
+        output_buffer.append("ca-click")
         for i in self.conn_reg:
             i.arm()
 
     def __call__(self):
         for i in self.conn_reg:
             i.arm()
 
     def __call__(self):
-        print(self.__class__.__name__ + ".__call__")
+        output_buffer.append(self.__class__.__name__ + ".__call__")
         conn, addr = self.sock._really_accept()
         self.conn_reg.append(ExplodingConnectionWrap(conn))
         return conn, addr
         conn, addr = self.sock._really_accept()
         self.conn_reg.append(ExplodingConnectionWrap(conn))
         return conn, addr
@@ -81,12 +89,12 @@ class ExplodingConnectionWrap(object):
         del self.conn._really_makefile
 
     def arm(self):
         del self.conn._really_makefile
 
     def arm(self):
-        print("tick")
+        output_buffer.append("tick")
         for i in self.file_reg:
             i.arm()
 
     def __call__(self, mode='r', bufsize=-1):
         for i in self.file_reg:
             i.arm()
 
     def __call__(self, mode='r', bufsize=-1):
-        print(self.__class__.__name__ + ".__call__")
+        output_buffer.append(self.__class__.__name__ + ".__call__")
         # file_obj = self.conn._really_makefile(*args, **kwargs)
         file_obj = ExplodingSocketFile(self.conn._sock, mode, bufsize)
         self.file_reg.append(file_obj)
         # file_obj = self.conn._really_makefile(*args, **kwargs)
         file_obj = ExplodingSocketFile(self.conn._sock, mode, bufsize)
         self.file_reg.append(file_obj)
@@ -96,69 +104,88 @@ class ExplodingConnectionWrap(object):
 class ExplodingSocketFile(eventlet.greenio._fileobject):
 
     def __init__(self, sock, mode='rb', bufsize=-1, close=False):
 class ExplodingSocketFile(eventlet.greenio._fileobject):
 
     def __init__(self, sock, mode='rb', bufsize=-1, close=False):
-        super(self.__class__, self).__init__(sock, mode, bufsize, close)
+        args = [bufsize, close] if six.PY2 else []
+        super(self.__class__, self).__init__(sock, mode, *args)
         self.armed = False
 
     def arm(self):
         self.armed = False
 
     def arm(self):
-        print("beep")
+        output_buffer.append("beep")
         self.armed = True
 
     def _fuse(self):
         if self.armed:
         self.armed = True
 
     def _fuse(self):
         if self.armed:
-            print("=== ~* BOOM *~ ===")
+            output_buffer.append(TAG_BOOM)
             raise socket.timeout("timed out")
 
     def readline(self, *args, **kwargs):
             raise socket.timeout("timed out")
 
     def readline(self, *args, **kwargs):
-        print(self.__class__.__name__ + ".readline")
+        output_buffer.append(self.__class__.__name__ + ".readline")
         self._fuse()
         return super(self.__class__, self).readline(*args, **kwargs)
 
 
         self._fuse()
         return super(self.__class__, self).readline(*args, **kwargs)
 
 
-if __name__ == '__main__':
-    for debug in (False, True):
-        print("SEPERATOR_SENTINEL")
-        print("debug set to: %s" % debug)
-
-        server_sock = eventlet.listen(('localhost', 0))
-        server_addr = server_sock.getsockname()
-        sock_wrap = NaughtySocketAcceptWrap(server_sock)
-
-        eventlet.spawn_n(
-            eventlet.wsgi.server,
-            debug=debug,
-            log=sys.stdout,
-            max_size=128,
-            site=tests.wsgi_test.Site(),
-            sock=server_sock,
-        )
-
+def step(debug):
+    output_buffer[:] = []
+
+    server_sock = eventlet.listen(('localhost', 0))
+    server_addr = server_sock.getsockname()
+    sock_wrap = NaughtySocketAcceptWrap(server_sock)
+
+    eventlet.spawn_n(
+        eventlet.wsgi.server,
+        debug=debug,
+        log=BufferLog,
+        max_size=128,
+        site=tests.wsgi_test.Site(),
+        sock=server_sock,
+    )
+
+    try:
+        # req #1 - normal
+        sock1 = eventlet.connect(server_addr)
+        sock1.settimeout(0.1)
+        fd1 = sock1.makefile('rwb')
+        fd1.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
+        fd1.flush()
+        tests.wsgi_test.read_http(sock1)
+
+        # let the server socket ops catch up, set bomb
+        eventlet.sleep(0)
+        output_buffer.append("arming...")
+        sock_wrap.arm()
+
+        # req #2 - old conn, post-arm - timeout
+        fd1.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
+        fd1.flush()
         try:
         try:
-            # req #1 - normal
-            sock1 = eventlet.connect(server_addr)
-            sock1.settimeout(0.1)
-            fd1 = sock1.makefile('rwb')
-            fd1.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
-            fd1.flush()
             tests.wsgi_test.read_http(sock1)
             tests.wsgi_test.read_http(sock1)
+            assert False, 'Expected ConnectionClosed exception'
+        except tests.wsgi_test.ConnectionClosed:
+            pass
+
+        fd1.close()
+        sock1.close()
+    finally:
+        # reset streams, then output trapped tracebacks
+        sock_wrap.unwrap()
+    # check output asserts in tests.wsgi_test.TestHttpd
+    # test_143_server_connection_timeout_exception
+
+    return output_buffer[:]
+
+
+def main():
+    output_normal = step(debug=False)
+    output_debug = step(debug=True)
+
+    assert "timed out" in output_debug[-1], repr(output_debug)
+    # if the BOOM check fails, it's because our timeout didn't happen
+    # (if eventlet stops using file.readline() to read HTTP headers,
+    # for instance)
+    assert TAG_BOOM == output_debug[-2], repr(output_debug)
+    assert TAG_BOOM == output_normal[-1], repr(output_normal)
+    assert "Traceback" not in output_debug, repr(output_debug)
+    assert "Traceback" not in output_normal, repr(output_normal)
+    print("pass")
 
 
-            # let the server socket ops catch up, set bomb
-            eventlet.sleep(0)
-            print("arming...")
-            sock_wrap.arm()
-
-            # req #2 - old conn, post-arm - timeout
-            fd1.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
-            fd1.flush()
-            try:
-                tests.wsgi_test.read_http(sock1)
-                assert False, 'Expected ConnectionClosed exception'
-            except tests.wsgi_test.ConnectionClosed:
-                pass
-
-            fd1.close()
-            sock1.close()
-        finally:
-            # reset streams, then output trapped tracebacks
-            sock_wrap.unwrap()
-        # check output asserts in tests.wsgi_test.TestHttpd
-        # test_143_server_connection_timeout_exception
+if __name__ == '__main__':
+    main()
diff --git a/python-eventlet/tests/manual/__init__.py b/python-eventlet/tests/manual/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/python-eventlet/tests/manual/regress-226-unpatched-ssl.py b/python-eventlet/tests/manual/regress-226-unpatched-ssl.py
new file mode 100644 (file)
index 0000000..2d0d49c
--- /dev/null
@@ -0,0 +1,3 @@
+import eventlet
+import requests
+requests.get('https://www.google.com/').status_code
similarity index 99%
rename from eventlet/tests/mock.py
rename to python-eventlet/tests/mock.py
index 7bb567ac3149ed4640a8e64c1545d5443f063e62..34300217f0eab2540e6fe65fb6c7f30629a61bbe 100644 (file)
@@ -1172,7 +1172,7 @@ class _patch(object):
 
         @wraps(func)
         def patched(*args, **keywargs):
 
         @wraps(func)
         def patched(*args, **keywargs):
-            # don't use a with here (backwards compatability with Python 2.4)
+            # don't use a with here (backwards compatibility with Python 2.4)
             extra_args = []
             entered_patchers = []
 
             extra_args = []
             entered_patchers = []
 
similarity index 91%
rename from eventlet/tests/mysqldb_test.py
rename to python-eventlet/tests/mysqldb_test.py
index e5a87d3e739cb79597fe60a1c8255515f69de4c2..578de4e4c15b7b655ba1f37cd8a1b89cdc7a092f 100644 (file)
@@ -6,15 +6,12 @@ import traceback
 
 import eventlet
 from eventlet import event
 
 import eventlet
 from eventlet import event
-from tests import (
-    LimitedTestCase,
-    run_python,
-    skip_unless, using_pyevent, get_database_auth,
-)
 try:
     from eventlet.green import MySQLdb
 except ImportError:
     MySQLdb = False
 try:
     from eventlet.green import MySQLdb
 except ImportError:
     MySQLdb = False
+import tests
+from tests import skip_unless, using_pyevent, get_database_auth
 
 
 def mysql_requirement(_f):
 
 
 def mysql_requirement(_f):
@@ -40,7 +37,7 @@ def mysql_requirement(_f):
         return False
 
 
         return False
 
 
-class TestMySQLdb(LimitedTestCase):
+class TestMySQLdb(tests.LimitedTestCase):
     def setUp(self):
         self._auth = get_database_auth()['MySQLdb']
         self.create_db()
     def setUp(self):
         self._auth = get_database_auth()['MySQLdb']
         self.create_db()
@@ -229,16 +226,7 @@ class TestMySQLdb(LimitedTestCase):
             conn.commit()
 
 
             conn.commit()
 
 
-class TestMonkeyPatch(LimitedTestCase):
+class TestMonkeyPatch(tests.LimitedTestCase):
     @skip_unless(mysql_requirement)
     def test_monkey_patching(self):
     @skip_unless(mysql_requirement)
     def test_monkey_patching(self):
-        testcode_path = os.path.join(
-            os.path.dirname(os.path.abspath(__file__)),
-            'mysqldb_test_monkey_patch.py',
-        )
-        output = run_python(testcode_path)
-        lines = output.splitlines()
-        self.assertEqual(len(lines), 2, output)
-        self.assertEqual(lines[0].replace("psycopg,", ""),
-                         'mysqltest MySQLdb,os,select,socket,thread,time')
-        self.assertEqual(lines[1], "connect True")
+        tests.run_isolated('mysqldb_monkey_patch.py')
similarity index 96%
rename from eventlet/tests/patcher_test.py
rename to python-eventlet/tests/patcher_test.py
index 5c9076f4459192e8cd7b45ed7d9457a54473de82..2e458c557dff97cf73c412cf5d745df610b4f594 100644 (file)
@@ -4,15 +4,12 @@ import sys
 import tempfile
 
 from eventlet.support import six
 import tempfile
 
 from eventlet.support import six
-from tests import LimitedTestCase, main, run_python, skip_with_pyevent
+import tests
 
 
 base_module_contents = """
 import socket
 
 
 base_module_contents = """
 import socket
-try:
-    import urllib.request as urllib
-except ImportError:
-    import urllib
+import urllib
 print("base {0} {1}".format(socket, urllib))
 """
 
 print("base {0} {1}".format(socket, urllib))
 """
 
@@ -32,7 +29,7 @@ print("importing {0} {1} {2} {3}".format(patching, socket, patching.socket, patc
 """
 
 
 """
 
 
-class ProcessBase(LimitedTestCase):
+class ProcessBase(tests.LimitedTestCase):
     TEST_TIMEOUT = 3  # starting processes is time-consuming
 
     def setUp(self):
     TEST_TIMEOUT = 3  # starting processes is time-consuming
 
     def setUp(self):
@@ -54,7 +51,7 @@ class ProcessBase(LimitedTestCase):
 
     def launch_subprocess(self, filename):
         path = os.path.join(self.tempdir, filename)
 
     def launch_subprocess(self, filename):
         path = os.path.join(self.tempdir, filename)
-        output = run_python(path)
+        output = tests.run_python(path)
         if six.PY3:
             output = output.decode('utf-8')
             separator = '\n'
         if six.PY3:
             output = output.decode('utf-8')
             separator = '\n'
@@ -86,7 +83,14 @@ class ImportPatched(ProcessBase):
         assert 'eventlet.green.httplib' not in lines[2], repr(output)
 
     def test_import_patched_defaults(self):
         assert 'eventlet.green.httplib' not in lines[2], repr(output)
 
     def test_import_patched_defaults(self):
-        self.write_to_tempfile("base", base_module_contents)
+        self.write_to_tempfile("base", """
+import socket
+try:
+    import urllib.request as urllib
+except ImportError:
+    import urllib
+print("base {0} {1}".format(socket, urllib))""")
+
         new_mod = """
 from eventlet import patcher
 base = patcher.import_patched('base')
         new_mod = """
 from eventlet import patcher
 base = patcher.import_patched('base')
@@ -242,7 +246,7 @@ def test_monkey_patch_threading():
 class Tpool(ProcessBase):
     TEST_TIMEOUT = 3
 
 class Tpool(ProcessBase):
     TEST_TIMEOUT = 3
 
-    @skip_with_pyevent
+    @tests.skip_with_pyevent
     def test_simple(self):
         new_mod = """
 import eventlet
     def test_simple(self):
         new_mod = """
 import eventlet
@@ -260,7 +264,7 @@ tpool.killall()
         assert '2' in lines[0], repr(output)
         assert '3' in lines[1], repr(output)
 
         assert '2' in lines[0], repr(output)
         assert '3' in lines[1], repr(output)
 
-    @skip_with_pyevent
+    @tests.skip_with_pyevent
     def test_unpatched_thread(self):
         new_mod = """import eventlet
 eventlet.monkey_patch(time=False, thread=False)
     def test_unpatched_thread(self):
         new_mod = """import eventlet
 eventlet.monkey_patch(time=False, thread=False)
@@ -273,7 +277,7 @@ import time
         output, lines = self.launch_subprocess('newmod.py')
         self.assertEqual(len(lines), 2, lines)
 
         output, lines = self.launch_subprocess('newmod.py')
         self.assertEqual(len(lines), 2, lines)
 
-    @skip_with_pyevent
+    @tests.skip_with_pyevent
     def test_patched_thread(self):
         new_mod = """import eventlet
 eventlet.monkey_patch(time=False, thread=True)
     def test_patched_thread(self):
         new_mod = """import eventlet
 eventlet.monkey_patch(time=False, thread=True)
@@ -493,9 +497,12 @@ t2.join()
 
 
 def test_importlib_lock():
 
 
 def test_importlib_lock():
-    output = run_python('tests/patcher_test_importlib_lock.py')
-    assert output.rstrip() == b'ok'
+    tests.run_isolated('patcher_importlib_lock.py')
+
+
+def test_threading_condition():
+    tests.run_isolated('patcher_threading_condition.py')
 
 
 
 
-if __name__ == '__main__':
-    main()
+def test_threading_join():
+    tests.run_isolated('patcher_threading_join.py')
similarity index 74%
rename from eventlet/tests/semaphore_test.py
rename to python-eventlet/tests/semaphore_test.py
index 13163302480d06ae6d8ef7e07b3749aa570b2f4d..ced91364534fc7ee8424ff1a32b6f19369115dca 100644 (file)
@@ -45,5 +45,24 @@ class TestSemaphore(LimitedTestCase):
         self.assertRaises(ValueError, sem.acquire, blocking=False, timeout=1)
 
 
         self.assertRaises(ValueError, sem.acquire, blocking=False, timeout=1)
 
 
+def test_semaphore_contention():
+    g_mutex = semaphore.Semaphore()
+    counts = [0, 0]
+
+    def worker(no):
+        while min(counts) < 200:
+            with g_mutex:
+                counts[no - 1] += 1
+                eventlet.sleep(0.001)
+
+    t1 = eventlet.spawn(worker, no=1)
+    t2 = eventlet.spawn(worker, no=2)
+    eventlet.sleep(0.5)
+    t1.kill()
+    t2.kill()
+
+    assert abs(counts[0] - counts[1]) < int(min(counts) * 0.1), counts
+
+
 if __name__ == '__main__':
 if __name__ == '__main__':
-    unittest.main()
+    unittest.main()
\ No newline at end of file
similarity index 72%
rename from eventlet/tests/subprocess_test.py
rename to python-eventlet/tests/subprocess_test.py
index 6964a742b85306172015789bbd5bacc6adb0de79..085f656c1e8695cafa04c545a7e010f446b2bb40 100644 (file)
@@ -1,7 +1,6 @@
 import eventlet
 from eventlet.green import subprocess
 import eventlet.patcher
 import eventlet
 from eventlet.green import subprocess
 import eventlet.patcher
-from nose.plugins.skip import SkipTest
 import sys
 import time
 original_subprocess = eventlet.patcher.original('subprocess')
 import sys
 import time
 original_subprocess = eventlet.patcher.original('subprocess')
@@ -19,7 +18,7 @@ def test_subprocess_wait():
     try:
         p.wait(timeout=0.1)
     except subprocess.TimeoutExpired as e:
     try:
         p.wait(timeout=0.1)
     except subprocess.TimeoutExpired as e:
-        str(e)  # make sure it doesnt throw
+        str(e)  # make sure it doesn't throw
         assert e.cmd == cmd
         assert e.timeout == 0.1
         ok = True
         assert e.cmd == cmd
         assert e.timeout == 0.1
         ok = True
@@ -29,14 +28,15 @@ def test_subprocess_wait():
 
 
 def test_communicate_with_poll():
 
 
 def test_communicate_with_poll():
+    # This test was being skipped since git 25812fca8, I don't there's
+    # a need to do this. The original comment:
+    #
     # https://github.com/eventlet/eventlet/pull/24
     # `eventlet.green.subprocess.Popen.communicate()` was broken
     # in Python 2.7 because the usage of the `select` module was moved from
     # `_communicate` into two other methods `_communicate_with_select`
     # and `_communicate_with_poll`. Link to 2.7's implementation:
     # http://hg.python.org/cpython/file/2145593d108d/Lib/subprocess.py#l1255
     # https://github.com/eventlet/eventlet/pull/24
     # `eventlet.green.subprocess.Popen.communicate()` was broken
     # in Python 2.7 because the usage of the `select` module was moved from
     # `_communicate` into two other methods `_communicate_with_select`
     # and `_communicate_with_poll`. Link to 2.7's implementation:
     # http://hg.python.org/cpython/file/2145593d108d/Lib/subprocess.py#l1255
-    if getattr(original_subprocess.Popen, '_communicate_with_poll', None) is None:
-        raise SkipTest('original subprocess.Popen does not have _communicate_with_poll')
 
     p = subprocess.Popen(
         [sys.executable, '-c', 'import time; time.sleep(0.5)'],
 
     p = subprocess.Popen(
         [sys.executable, '-c', 'import time; time.sleep(0.5)'],
@@ -45,3 +45,22 @@ def test_communicate_with_poll():
     eventlet.with_timeout(0.1, p.communicate, timeout_value=True)
     tdiff = time.time() - t1
     assert 0.1 <= tdiff <= 0.2, 'did not stop within allowed time'
     eventlet.with_timeout(0.1, p.communicate, timeout_value=True)
     tdiff = time.time() - t1
     assert 0.1 <= tdiff <= 0.2, 'did not stop within allowed time'
+
+
+def test_close_popen_stdin_with_close_fds():
+    p = subprocess.Popen(
+        ['ls'],
+        stdin=subprocess.PIPE,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        close_fds=True,
+        shell=False,
+        cwd=None,
+        env=None)
+
+    p.communicate(None)
+
+    try:
+        p.stdin.close()
+    except Exception as e:
+        assert False, "Exception should not be raised, got %r instead" % e
similarity index 79%
rename from eventlet/tests/test__greenness.py
rename to python-eventlet/tests/test__greenness.py
index 7d9089089fada8a4624249ea1249c99facb29f32..a594b4da4d74738ec0d96f6276fd905283f477cc 100644 (file)
@@ -4,8 +4,15 @@ If either operation blocked the whole script would block and timeout.
 """
 import unittest
 
 """
 import unittest
 
-from eventlet.green import urllib2, BaseHTTPServer
+from eventlet.green import BaseHTTPServer
 from eventlet import spawn, kill
 from eventlet import spawn, kill
+from eventlet.support import six
+
+if six.PY2:
+    from eventlet.green.urllib2 import HTTPError, urlopen
+else:
+    from eventlet.green.urllib.request import urlopen
+    from eventlet.green.urllib.error import HTTPError
 
 
 class QuietHandler(BaseHTTPServer.BaseHTTPRequestHandler):
 
 
 class QuietHandler(BaseHTTPServer.BaseHTTPRequestHandler):
@@ -40,12 +47,12 @@ class TestGreenness(unittest.TestCase):
         self.server.server_close()
         kill(self.gthread)
 
         self.server.server_close()
         kill(self.gthread)
 
-    def test_urllib2(self):
+    def test_urllib(self):
         self.assertEqual(self.server.request_count, 0)
         try:
         self.assertEqual(self.server.request_count, 0)
         try:
-            urllib2.urlopen('http://127.0.0.1:%s' % self.port)
+            urlopen('http://127.0.0.1:%s' % self.port)
             assert False, 'should not get there'
             assert False, 'should not get there'
-        except urllib2.HTTPError as ex:
+        except HTTPError as ex:
             assert ex.code == 501, repr(ex)
         self.assertEqual(self.server.request_count, 1)
 
             assert ex.code == 501, repr(ex)
         self.assertEqual(self.server.request_count, 1)
 
similarity index 87%
rename from eventlet/tests/test__socket_errors.py
rename to python-eventlet/tests/test__socket_errors.py
index 7832de00ecccbc58c324c30a1966aa980a24f020..21fab406b62c56756770b87ed61d15f86ccf8d67 100644 (file)
@@ -52,5 +52,15 @@ class TestSocketErrors(unittest.TestCase):
             cs.close()
             server.close()
 
             cs.close()
             server.close()
 
+
+def test_create_connection_refused():
+    errno = None
+    try:
+        socket.create_connection(('127.0.0.1', 0))
+    except socket.error as ex:
+        errno = ex.errno
+    assert errno in [111, 61, 10061], 'Expected socket.error ECONNREFUSED, got {0}'.format(errno)
+
+
 if __name__ == '__main__':
     unittest.main()
 if __name__ == '__main__':
     unittest.main()
similarity index 96%
rename from eventlet/tests/tpool_test.py
rename to python-eventlet/tests/tpool_test.py
index 5af1a40d17159b8c71952916f0dfaa80390f973d..818bb4561ea1f67b0086aa08f4422944fdc7ffcc 100644 (file)
@@ -20,7 +20,7 @@ import re
 import time
 
 import eventlet
 import time
 
 import eventlet
-from eventlet import tpool
+from eventlet import tpool, debug, event
 from eventlet.support import six
 from tests import LimitedTestCase, skipped, skip_with_pyevent, main
 
 from eventlet.support import six
 from tests import LimitedTestCase, skipped, skip_with_pyevent, main
 
@@ -228,6 +228,22 @@ class TestTpool(LimitedTestCase):
         tpool.killall()
         tpool.setup()
 
         tpool.killall()
         tpool.setup()
 
+    @skip_with_pyevent
+    def test_killall_remaining_results(self):
+        semaphore = event.Event()
+
+        def native_fun():
+            time.sleep(.5)
+
+        def gt_fun():
+            semaphore.send(None)
+            tpool.execute(native_fun)
+
+        gt = eventlet.spawn(gt_fun)
+        semaphore.wait()
+        tpool.killall()
+        gt.wait()
+
     @skip_with_pyevent
     def test_autowrap(self):
         x = tpool.Proxy({'a': 1, 'b': 2}, autowrap=(int,))
     @skip_with_pyevent
     def test_autowrap(self):
         x = tpool.Proxy({'a': 1, 'b': 2}, autowrap=(int,))
similarity index 97%
rename from eventlet/tests/websocket_new_test.py
rename to python-eventlet/tests/websocket_new_test.py
index 95d36b507b3fbf625502b8a57dd7c4a4f4d9f39b..381d51cb3d6969f60eb8ae9544935ec46cb05886 100644 (file)
@@ -1,14 +1,11 @@
 import errno
 import struct
 
 import errno
 import struct
 
-from nose.tools import eq_
-
 import eventlet
 from eventlet import event
 from eventlet import websocket
 from eventlet.green import httplib
 from eventlet.green import socket
 import eventlet
 from eventlet import event
 from eventlet import websocket
 from eventlet.green import httplib
 from eventlet.green import socket
-from eventlet import websocket
 from eventlet.support import six
 
 from tests.wsgi_test import _TestBase
 from eventlet.support import six
 
 from tests.wsgi_test import _TestBase
@@ -129,11 +126,11 @@ class TestWebSocket(_TestBase):
         sock.recv(1024)
         ws = websocket.RFC6455WebSocket(sock, {}, client=True)
         ws.send(b'hello')
         sock.recv(1024)
         ws = websocket.RFC6455WebSocket(sock, {}, client=True)
         ws.send(b'hello')
-        eq_(ws.wait(), b'hello')
+        assert ws.wait() == b'hello'
         ws.send(b'hello world!\x01')
         ws.send(u'hello world again!')
         ws.send(b'hello world!\x01')
         ws.send(u'hello world again!')
-        eq_(ws.wait(), b'hello world!\x01')
-        eq_(ws.wait(), u'hello world again!')
+        assert ws.wait() == b'hello world!\x01'
+        assert ws.wait() == u'hello world again!'
         ws.close()
         eventlet.sleep(0.01)
 
         ws.close()
         eventlet.sleep(0.01)
 
similarity index 98%
rename from eventlet/tests/wsgi_test.py
rename to python-eventlet/tests/wsgi_test.py
index 179881df4f4c545fbf14aa3046abb2b705adf768..d5cea188f212505f28bc55b495b7a44a13b70019 100644 (file)
@@ -1330,11 +1330,24 @@ class TestHttpd(_TestBase):
         self.assertEqual(result.headers_lower['connection'], 'close')
         assert 'transfer-encoding' not in result.headers_lower
 
         self.assertEqual(result.headers_lower['connection'], 'close')
         assert 'transfer-encoding' not in result.headers_lower
 
-    def test_unicode_raises_error(self):
+    def test_unicode_with_only_ascii_characters_works(self):
         def wsgi_app(environ, start_response):
             start_response("200 OK", [])
         def wsgi_app(environ, start_response):
             start_response("200 OK", [])
-            yield u"oh hai"
-            yield u"non-encodable unicode: \u0230"
+            yield b"oh hai, "
+            yield u"xxx"
+        self.site.application = wsgi_app
+        sock = eventlet.connect(('localhost', self.port))
+        fd = sock.makefile('rwb')
+        fd.write(b'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
+        fd.flush()
+        result = read_http(sock)
+        assert b'xxx' in result.body
+
+    def test_unicode_with_nonascii_characters_raises_error(self):
+        def wsgi_app(environ, start_response):
+            start_response("200 OK", [])
+            yield b"oh hai, "
+            yield u"xxx \u0230"
         self.site.application = wsgi_app
         sock = eventlet.connect(('localhost', self.port))
         fd = sock.makefile('rwb')
         self.site.application = wsgi_app
         sock = eventlet.connect(('localhost', self.port))
         fd = sock.makefile('rwb')
@@ -1343,7 +1356,6 @@ class TestHttpd(_TestBase):
         result = read_http(sock)
         self.assertEqual(result.status, 'HTTP/1.1 500 Internal Server Error')
         self.assertEqual(result.headers_lower['connection'], 'close')
         result = read_http(sock)
         self.assertEqual(result.status, 'HTTP/1.1 500 Internal Server Error')
         self.assertEqual(result.headers_lower['connection'], 'close')
-        assert b'unicode' in result.body
 
     def test_path_info_decoding(self):
         def wsgi_app(environ, start_response):
 
     def test_path_info_decoding(self):
         def wsgi_app(environ, start_response):
@@ -1443,22 +1455,7 @@ class TestHttpd(_TestBase):
         # Handle connection socket timeouts
         # https://bitbucket.org/eventlet/eventlet/issue/143/
         # Runs tests.wsgi_test_conntimeout in a separate process.
         # Handle connection socket timeouts
         # https://bitbucket.org/eventlet/eventlet/issue/143/
         # Runs tests.wsgi_test_conntimeout in a separate process.
-        testcode_path = os.path.join(
-            os.path.dirname(os.path.abspath(__file__)),
-            'wsgi_test_conntimeout.py')
-        output = tests.run_python(testcode_path)
-        sections = output.split(b"SEPERATOR_SENTINEL")
-        # first section is empty
-        self.assertEqual(3, len(sections), output)
-        # if the "BOOM" check fails, it's because our timeout didn't happen
-        # (if eventlet stops using file.readline() to read HTTP headers,
-        # for instance)
-        for runlog in sections[1:]:
-            debug = False if "debug set to: False" in runlog else True
-            if debug:
-                self.assertTrue("timed out" in runlog)
-            self.assertTrue("BOOM" in runlog)
-            self.assertFalse("Traceback" in runlog)
+        tests.run_isolated('wsgi_connection_timeout.py')
 
     def test_server_socket_timeout(self):
         self.spawn_server(socket_timeout=0.1)
 
     def test_server_socket_timeout(self):
         self.spawn_server(socket_timeout=0.1)
@@ -1743,7 +1740,16 @@ class TestChunkedInput(_TestBase):
             fd = self.connect()
             fd.sendall(req.encode())
             fd.close()
             fd = self.connect()
             fd.sendall(req.encode())
             fd.close()
-            eventlet.sleep(0.0)
+
+            eventlet.sleep(0)
+
+            # This is needed because on Python 3 GreenSocket.recv_into is called
+            # rather than recv; recv_into right now (git 5ec3a3c) trampolines to
+            # the hub *before* attempting to read anything from a file descriptor
+            # therefore we need one extra context switch to let it notice closed
+            # socket, die and leave the hub empty
+            if six.PY3:
+                eventlet.sleep(0)
         finally:
             signal.alarm(0)
             signal.signal(signal.SIGALRM, signal.SIG_DFL)
         finally:
             signal.alarm(0)
             signal.signal(signal.SIGALRM, signal.SIG_DFL)
diff --git a/python-eventlet/tox.ini b/python-eventlet/tox.ini
new file mode 100644 (file)
index 0000000..e4f4e76
--- /dev/null
@@ -0,0 +1,55 @@
+# The flake8 and pep8 sections just contain configuration for corresponding tools.
+# Checkers are not run implicitly.
+[flake8]
+exclude = *.egg*,.env,.git,.hg,.tox,_*,build*,dist*,venv*,six.py,mock.py
+ignore = E261
+max-line-length = 101
+
+[pep8]
+count = 1
+exclude = *.egg*,.env,.git,.hg,.tox,_*,build*,dist*,venv*,six.py,mock.py
+ignore = E261
+max-line-length = 101
+show-source = 1
+statistics = 1
+
+[tox]
+minversion=1.8
+envlist =
+    pep8, py{26,27,33,34,py}-{selects,poll,epolls}, py{27,34,py}-dns
+
+[testenv:pep8]
+basepython = python2.7
+setenv =
+    {[testenv]setenv}
+deps =
+    pep8==1.5.6
+commands =
+    pep8 benchmarks/ eventlet/ tests/
+
+[testenv]
+downloadcache = {toxworkdir}/pip_download_cache
+setenv =
+    PYTHONDONTWRITEBYTECODE = 1
+    selects: EVENTLET_HUB = selects
+    poll: EVENTLET_HUB = poll
+    epolls: EVENTLET_HUB = epolls
+basepython =
+    py26: python2.6
+    py27: python2.7
+    py33: python3.3
+    py34: python3.4
+    pypy: pypy
+deps =
+    nose==1.3.1
+    setuptools==5.4.1
+    py27-dns: dnspython==1.12.0
+    py{26,27}-{selects,poll,epolls}: MySQL-python==1.2.5
+    py{34,py}-dns: dnspython3==1.12.0
+    {selects,poll,epolls}: psycopg2cffi-compat==1.1
+    {selects,poll,epolls}: pyopenssl==0.13
+    {selects,poll,epolls}: pyzmq==13.1.0
+commands =
+    nosetests --verbose {posargs:tests/}
+    nosetests --verbose --with-doctest eventlet/coros.py eventlet/event.py \
+        eventlet/pools.py eventlet/queue.py eventlet/timeout.py