From 3dbfedbaa1a106967b7588f6ce50b89788837a33 Mon Sep 17 00:00:00 2001
From: Ivan Udovichenko
Date: Fri, 4 Mar 2016 18:30:28 +0200
Subject: [PATCH] Add python-eventlet package to MOS 9.0 repository
Change-Id: I34be69b0bce9526e64119f468b60cba2c5ec2aa1
Version: 0.18.4-1~u14.04+mos1
Source: http://http.debian.net/debian/pool/main/p/python-eventlet/python-eventlet_0.18.4-1.dsc
---
debian/changelog | 26 +
debian/control | 13 +-
debian/gbp.conf | 2 +-
...d_was_running_empty_loop_on_ENOTCONN.patch | 29 -
...self.assert-in-tests.patcher_test.py.patch | 16 +-
debian/patches/series | 1 -
.../set-defaults-to-be-tlsv1-not-sslv23.patch | 6 +-
...ged-python-mock-rather-than-embedded.patch | 25 +-
debian/python-eventlet.lintian-overrides | 1 -
debian/rules | 3 +
debian/tests/control | 3 +
debian/tests/listen | 5 +
debian/tests/listen3 | 5 +
python-eventlet/AUTHORS | 13 +-
python-eventlet/NEWS | 80 +++
python-eventlet/bin/build-website.bash | 5 +-
python-eventlet/bin/release | 134 ++++-
python-eventlet/doc/Makefile | 8 +-
python-eventlet/doc/design_patterns.rst | 2 +-
python-eventlet/doc/real_index.html | 10 +-
python-eventlet/eventlet/__init__.py | 2 +-
python-eventlet/eventlet/backdoor.py | 32 +-
.../eventlet/green/OpenSSL/__init__.py | 10 +-
python-eventlet/eventlet/green/select.py | 3 +
python-eventlet/eventlet/green/selectors.py | 23 +
python-eventlet/eventlet/green/ssl.py | 36 +-
python-eventlet/eventlet/green/subprocess.py | 20 +-
python-eventlet/eventlet/greenio/base.py | 89 +--
python-eventlet/eventlet/greenio/py2.py | 2 +-
python-eventlet/eventlet/greenio/py3.py | 33 +-
python-eventlet/eventlet/patcher.py | 46 +-
python-eventlet/eventlet/queue.py | 16 +-
python-eventlet/eventlet/tpool.py | 47 +-
python-eventlet/eventlet/websocket.py | 2 +-
python-eventlet/eventlet/wsgi.py | 111 ++--
python-eventlet/tests/__init__.py | 37 +-
python-eventlet/tests/backdoor_test.py | 30 +-
python-eventlet/tests/db_pool_test.py | 146 +----
python-eventlet/tests/env_test.py | 140 ++---
python-eventlet/tests/greenio_test.py | 335 ++++++-----
.../tests/greenpipe_test_with_statement.py | 25 -
.../tests/isolated/env_tpool_negative.py | 11 +
.../tests/isolated/env_tpool_size.py | 26 +
.../tests/isolated/env_tpool_zero.py | 22 +
.../isolated/greendns_from_address_203.py | 9 +-
.../isolated/greenio_double_close_219.py | 6 +-
.../tests/isolated/mysqldb_monkey_patch.py | 3 -
...her_blocking_select_methods_are_deleted.py | 34 ++
.../tests/isolated/patcher_importlib_lock.py | 11 +-
.../patcher_socketserver_selectors.py | 30 +
.../isolated/patcher_threading_condition.py | 5 +-
.../tests/isolated/patcher_threading_join.py | 5 +-
.../subprocess_patched_communicate.py | 11 +
.../tests/isolated/wsgi_connection_timeout.py | 1 +
python-eventlet/tests/openssl_test.py | 17 +
python-eventlet/tests/patcher_test.py | 34 +-
python-eventlet/tests/semaphore_test.py | 2 +-
python-eventlet/tests/socket_test.py | 23 +
python-eventlet/tests/subprocess_test.py | 23 +-
python-eventlet/tests/thread_test.py | 25 +-
python-eventlet/tests/tpool_test.py | 24 +-
python-eventlet/tests/websocket_new_test.py | 57 +-
python-eventlet/tests/websocket_test.py | 172 +++---
python-eventlet/tests/wsgi_test.py | 543 +++++++++---------
python-eventlet/tox.ini | 2 +-
65 files changed, 1497 insertions(+), 1171 deletions(-)
delete mode 100644 debian/patches/greenio_send_was_running_empty_loop_on_ENOTCONN.patch
delete mode 100644 debian/python-eventlet.lintian-overrides
create mode 100644 debian/tests/control
create mode 100755 debian/tests/listen
create mode 100755 debian/tests/listen3
delete mode 100644 python-eventlet/tests/greenpipe_test_with_statement.py
create mode 100644 python-eventlet/tests/isolated/env_tpool_negative.py
create mode 100644 python-eventlet/tests/isolated/env_tpool_size.py
create mode 100644 python-eventlet/tests/isolated/env_tpool_zero.py
create mode 100644 python-eventlet/tests/isolated/patcher_blocking_select_methods_are_deleted.py
create mode 100644 python-eventlet/tests/isolated/patcher_socketserver_selectors.py
create mode 100644 python-eventlet/tests/isolated/subprocess_patched_communicate.py
create mode 100644 python-eventlet/tests/openssl_test.py
diff --git a/debian/changelog b/debian/changelog
index 2eee87a..28547b1 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,29 @@
+python-eventlet (0.18.4-1~u14.04+mos1) mos9.0; urgency=medium
+
+ * Source: http://http.debian.net/debian/pool/main/p/python-eventlet/python-eventlet_0.18.4-1.dsc
+
+ -- Ivan Udovichenko Fri, 04 Mar 2016 18:29:23 +0200
+
+python-eventlet (0.18.4-1) experimental; urgency=medium
+
+ [ OndÅej Nový ]
+ * Removed greenio_send_was_running_empty_loop_on_ENOTCONN.patch
+ (Applied upstream)
+ * Rebased use-packaged-python-mock-rather-than-embedded.patch
+ * Rebased set-defaults-to-be-tlsv1-not-sslv23.patch
+ * Rebased remove-self.assert-in-tests.patcher_test.py.patch
+ * Added python(3)-dnspython build dependency.
+ * Fixed VCS URLs (https).
+ * Standards-Version is 3.9.7 now (no change).
+ * Fixed upstream changelog.
+ * Added Debian tests.
+ * Added myself as uploader.
+
+ [ Corey Bryant ]
+ * New upstream release.
+
+ -- OndÅej Nový Fri, 26 Feb 2016 21:44:11 +0100
+
python-eventlet (0.17.4-2~u14.04+mos1) mos8.0; urgency=medium
* Source: http://http.debian.net/debian/pool/main/p/python-eventlet/python-eventlet_0.17.4-2.dsc
diff --git a/debian/control b/debian/control
index 821eb1a..d6c80e7 100644
--- a/debian/control
+++ b/debian/control
@@ -2,8 +2,9 @@ Source: python-eventlet
Section: python
Priority: optional
Maintainer: PKG OpenStack
-Uploaders: Laszlo Boszormenyi (GCS) ,
+Uploaders: Laszlo Boszormenyi (GCS) ,
Thomas Goirand ,
+ OndÅej Nový ,
Build-Depends: debhelper (>= 9),
dh-python,
openstack-pkg-tools,
@@ -12,21 +13,23 @@ Build-Depends: debhelper (>= 9),
python-sphinx,
python3-all,
python3-setuptools,
-Build-Depends-Indep: python-greenlet,
+Build-Depends-Indep: python-dnspython,
+ python-greenlet,
python-httplib2,
python-mock,
python-nose,
python-openssl,
python-zmq,
+ python3-dnspython,
python3-greenlet,
python3-httplib2,
python3-mock,
python3-nose,
python3-openssl,
python3-zmq,
-Standards-Version: 3.9.6
-Vcs-Browser: http://anonscm.debian.org/gitweb/?p=openstack/python-eventlet.git;a=summary
-Vcs-Git: git://anonscm.debian.org/openstack/python-eventlet.git
+Standards-Version: 3.9.7
+Vcs-Browser: https://anonscm.debian.org/cgit/openstack/python-eventlet.git/
+Vcs-Git: https://anonscm.debian.org/git/openstack/python-eventlet.git
Homepage: http://eventlet.net
Package: python-eventlet
diff --git a/debian/gbp.conf b/debian/gbp.conf
index 7bf5959..017bb3c 100644
--- a/debian/gbp.conf
+++ b/debian/gbp.conf
@@ -1,6 +1,6 @@
[DEFAULT]
upstream-branch = master
-debian-branch = debian/unstable
+debian-branch = debian/experimental
upstream-tag = %(version)s
compression = xz
diff --git a/debian/patches/greenio_send_was_running_empty_loop_on_ENOTCONN.patch b/debian/patches/greenio_send_was_running_empty_loop_on_ENOTCONN.patch
deleted file mode 100644
index 040c109..0000000
--- a/debian/patches/greenio_send_was_running_empty_loop_on_ENOTCONN.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-Description: greenio: send() was running empty loop on ENOTCONN
- Thanks to Seyeong Kim
- https://github.com/eventlet/eventlet/issues/192
-Author: Sergey Shepelev
-Date: Fri, 15 May 2015 03:56:04 +0300
-
-diff --git a/AUTHORS b/AUTHORS
-index e0ab0e2..c57f010 100644
---- a/AUTHORS
-+++ b/AUTHORS
-@@ -119,3 +119,4 @@ Thanks To
- * Sean Dague, wsgi: Provide python logging compatibility
- * Tim Simmons, Use _socket_nodns and select in dnspython support
- * Antonio Cuni, fix fd double close on PyPy
-+* Seyeong Kim
-diff --git a/eventlet/greenio/base.py b/eventlet/greenio/base.py
-index 8da51ca..1e43176 100644
---- a/eventlet/greenio/base.py
-+++ b/eventlet/greenio/base.py
-@@ -358,7 +358,8 @@ def send(self, data, flags=0):
- try:
- total_sent += fd.send(data[total_sent:], flags)
- except socket.error as e:
-- if get_errno(e) not in SOCKET_BLOCKING:
-+ eno = get_errno(e)
-+ if eno == errno.ENOTCONN or eno not in SOCKET_BLOCKING:
- raise
-
- if total_sent == len_data:
diff --git a/debian/patches/remove-self.assert-in-tests.patcher_test.py.patch b/debian/patches/remove-self.assert-in-tests.patcher_test.py.patch
index 2946ca1..2157ca5 100644
--- a/debian/patches/remove-self.assert-in-tests.patcher_test.py.patch
+++ b/debian/patches/remove-self.assert-in-tests.patcher_test.py.patch
@@ -3,20 +3,18 @@ Author: Thomas Goirand
Forwarded: no
Last-Update: 2014-09-07
-Index: python-eventlet-0.17.3/tests/patcher_test.py
-===================================================================
---- python-eventlet-0.17.3.orig/tests/patcher_test.py
-+++ python-eventlet-0.17.3/tests/patcher_test.py
-@@ -325,7 +325,7 @@ print(len(_threading._active))
+--- a/tests/patcher_test.py
++++ b/tests/patcher_test.py
+@@ -327,7 +327,7 @@
self.assertEqual(len(lines), 4, "\n".join(lines))
assert lines[0].startswith('
Forwarded: no
Last-Update: 2015-05-21
---- python-eventlet-0.17.3.orig/eventlet/green/ssl.py
-+++ python-eventlet-0.17.3/eventlet/green/ssl.py
-@@ -46,7 +46,7 @@ class GreenSSLSocket(_original_sslsocket
+--- a/eventlet/green/ssl.py
++++ b/eventlet/green/ssl.py
+@@ -48,7 +48,7 @@
def __init__(self, sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
diff --git a/debian/patches/use-packaged-python-mock-rather-than-embedded.patch b/debian/patches/use-packaged-python-mock-rather-than-embedded.patch
index 41654e8..12a3bad 100644
--- a/debian/patches/use-packaged-python-mock-rather-than-embedded.patch
+++ b/debian/patches/use-packaged-python-mock-rather-than-embedded.patch
@@ -4,27 +4,26 @@ Author: Thomas Goirand
Forwarded: no
Last-Update: 2015-02-08
---- python-eventlet-0.16.1.orig/tests/db_pool_test.py
-+++ python-eventlet-0.16.1/tests/db_pool_test.py
-@@ -7,7 +7,8 @@ import os
+--- a/tests/db_pool_test.py
++++ b/tests/db_pool_test.py
+@@ -7,7 +7,8 @@
import traceback
from unittest import TestCase, main
--from tests import mock, skipped, skip_unless, skip_with_pyevent, get_database_auth
+-from tests import mock, skip_unless, skip_with_pyevent, get_database_auth
+import mock
-+from tests import skipped, skip_unless, skip_with_pyevent, get_database_auth
++from tests import skip_unless, skip_with_pyevent, get_database_auth
from eventlet import event
from eventlet import db_pool
from eventlet.support import six
---- python-eventlet-0.16.1.orig/tests/websocket_test.py
-+++ python-eventlet-0.16.1/tests/websocket_test.py
-@@ -8,7 +8,8 @@ from eventlet.green import httplib
- from eventlet.support import six
+--- a/tests/websocket_test.py
++++ b/tests/websocket_test.py
+@@ -9,7 +9,7 @@
from eventlet.websocket import WebSocket, WebSocketWSGI
--from tests import certificate_file, LimitedTestCase, mock, private_key_file
+ import tests
+-from tests import mock
+import mock
-+from tests import certificate_file, LimitedTestCase, private_key_file
- from tests import skip_if_no_ssl
- from tests.wsgi_test import _TestBase
+ import tests.wsgi_test
+
diff --git a/debian/python-eventlet.lintian-overrides b/debian/python-eventlet.lintian-overrides
deleted file mode 100644
index 9885900..0000000
--- a/debian/python-eventlet.lintian-overrides
+++ /dev/null
@@ -1 +0,0 @@
-python-eventlet: no-upstream-changelog
diff --git a/debian/rules b/debian/rules
index 12e908d..f0a1aef 100755
--- a/debian/rules
+++ b/debian/rules
@@ -37,3 +37,6 @@ override_dh_compress:
override_dh_clean:
dh_clean -O--buildsystem=python_distutils
rm -rf build
+
+override_dh_installchangelogs:
+ dh_installchangelogs NEWS
diff --git a/debian/tests/control b/debian/tests/control
new file mode 100644
index 0000000..68bb763
--- /dev/null
+++ b/debian/tests/control
@@ -0,0 +1,3 @@
+Tests: listen listen3
+Depends: python-eventlet,
+ python3-eventlet,
diff --git a/debian/tests/listen b/debian/tests/listen
new file mode 100755
index 0000000..e6e1145
--- /dev/null
+++ b/debian/tests/listen
@@ -0,0 +1,5 @@
+#!/usr/bin/python
+
+import eventlet
+
+eventlet.listen(('localhost', 7000))
diff --git a/debian/tests/listen3 b/debian/tests/listen3
new file mode 100755
index 0000000..76179e0
--- /dev/null
+++ b/debian/tests/listen3
@@ -0,0 +1,5 @@
+#!/usr/bin/python3
+
+import eventlet
+
+eventlet.listen(('localhost', 7000))
diff --git a/python-eventlet/AUTHORS b/python-eventlet/AUTHORS
index e0ab0e2..69e38c5 100644
--- a/python-eventlet/AUTHORS
+++ b/python-eventlet/AUTHORS
@@ -39,6 +39,8 @@ Contributors
* Victor Sergeyev
* David Szotten
* Victor Stinner
+* Samuel Merritt
+* Eric Urban
Linden Lab Contributors
-----------------------
@@ -91,7 +93,6 @@ Thanks To
* Peter Skirko, fixing socket.settimeout(0) bug
* Derk Tegeler, Pre-cache proxied GreenSocket methods (Bitbucket #136)
* David Malcolm, optional "timeout" argument to the subprocess module (Bitbucket #89)
-* Eric Urban, fix wsgi.input 1-byte (Bitbucket #150)
* David Goetz, wsgi: Allow minimum_chunk_size to be overriden on a per request basis
* Dmitry Orlov, websocket: accept Upgrade: websocket (lowercase)
* Zhang Hua, profile: accumulate results between runs (Bitbucket #162)
@@ -119,3 +120,13 @@ Thanks To
* Sean Dague, wsgi: Provide python logging compatibility
* Tim Simmons, Use _socket_nodns and select in dnspython support
* Antonio Cuni, fix fd double close on PyPy
+* Seyeong Kim
+* Ihar Hrachyshka
+* Janusz Harkot
+* Fukuchi Daisuke
+* Ramakrishnan G
+* ashutosh-mishra
+* Azhar Hussain
+* Josh VanderLinden
+* Levente Polyak
+* Phus Lu
diff --git a/python-eventlet/NEWS b/python-eventlet/NEWS
index 4e8df12..eb0ed9a 100644
--- a/python-eventlet/NEWS
+++ b/python-eventlet/NEWS
@@ -1,3 +1,83 @@
+0.18.4
+======
+* wsgi: change TCP_NODELAY to TCP_QUICKACK, ignore socket error when not available
+
+0.18.3
+======
+* wsgi: Use buffered writes - fixes partial socket.send without custom
+ writelines(); Github issue #295
+* wsgi: TCP_NODELAY enabled by default
+
+0.18.2
+======
+* wsgi: Fix data loss on partial writes (socket.send); Thanks to Jakub Stasiak
+
+0.18.1
+======
+* IMPORTANT: do not use Eventlet 0.18.0 and 0.18.1
+* patcher: Fix AttributeError in subprocess communicate()
+* greenio: Fix "TypeError: an integer is required" in sendto()
+
+0.18.0
+======
+* IMPORTANT: do not use Eventlet 0.18.0 and 0.18.1
+* greenio: Fixed a bug that could cause send() to start an endless loop on
+ ENOTCONN; Thanks to Seyeong Kim
+* wsgi: Fixed UNIX socket address being trimmed in "wsgi starting" log; Thanks
+ to Ihar Hrachyshka
+* ssl: Ported eventlet.green.OpenSSL to Python 3; Thanks to Victor Stinner
+* greenio: Made read() support buflen=-1 and added readall() (Python 3);
+ Thanks to David Szotten
+* wsgi: Made the error raised in case of chunk read failures more precise (this
+ should be backwards compatible as the new exception class,
+ wsgi.ChunkReadError, is a subclass of ValueError which was being used there
+ before); Thanks to Samuel Merritt
+* greenio: Fixed socket.recv() sometimes returning str instead of bytes on
+ Python 3; Thanks to Janusz Harkot
+* wsgi: Improved request body discarding
+* websocket: Fixed TypeError on empty websocket message (Python 3); Thanks to
+ Fukuchi Daisuke
+* subprocess: Fixed universal_newlines support
+* wsgi: Output of 0-byte chunks is now suppressed; Thanks to Samuel Merritt
+* Improved the documentation; Thanks to Ramakrishnan G, ashutosh-mishra and
+ Azhar Hussain
+* greenio: Changed GreenFileIO.write() (Python 3) to always write all data to
+ match the behavior on Python 2; Thanks to Victor Stinner
+* subprocess: Fixed missing subprocess.mswindows attribute on Python 3.5;
+ Thanks to Josh VanderLinden
+* ssl/monkey patching: Fixed a bug that would cause merely importing eventlet
+ to monkey patch the ssl module; Thanks to David Szotten
+* documentation: Added support for building plain text documentation; thanks
+ to Levente Polyak
+* greenio: Fixed handling blocking IO errors in various GreenSocket methods;
+ Thanks to Victor Stinner
+* greenio: Fixed GreenPipe ignoring the bufsize parameter on Python 2; Thanks
+ to Phus Lu
+* backdoor: Added Unix and IPv6 socket support; Thanks to Eric Urban
+
+Backwards incompatible:
+
+* monkey patching: The following select methods and selector classes are now
+ removed, instead of being left in their respective modules after patching
+ even though they are not green (this also fixes HTTPServer.serve_forever()
+ blocking whole process on Python 3):
+
+ * select.poll
+ * select.epoll
+ * select.devpoll
+ * select.kqueue
+ * select.kevent
+ * selectors.PollSelector
+ * selectors.EpollSelector
+ * selectors.DevpollSelector
+ * selectors.KqueueSelector
+
+ Additionally selectors.DefaultSelector points to a green SelectSelector
+
+* greenio: Fixed send() to no longer behave like sendall() which makes it
+ consistent with Python standard library and removes a source of very subtle
+ errors
+
0.17.4
======
* ssl: incorrect initalization of default context; Thanks to stuart-mclaren
diff --git a/python-eventlet/bin/build-website.bash b/python-eventlet/bin/build-website.bash
index 8f461e0..6fd4e01 100755
--- a/python-eventlet/bin/build-website.bash
+++ b/python-eventlet/bin/build-website.bash
@@ -53,8 +53,7 @@ rm -f "doc/changelog.rst"
if [ $commit -eq 1 ]; then
echo "3. Updating git branch gh-pages"
- source_name=`git rev-parse --abbrev-ref HEAD`
- source_id=`git rev-parse --short HEAD`
+ source_name=`git describe --dirty --tags`
git branch --track gh-pages origin/gh-pages || true
git checkout gh-pages
git ls-files |grep -Ev '^.gitignore$' |xargs rm -f
@@ -70,5 +69,5 @@ if [ $commit -eq 1 ]; then
git status
read -p "Carefully read git status output above, press Enter to continue or Ctrl+C to abort"
- git commit --edit -m "Website built from $source_name $source_id"
+ git commit --edit -m "Website built from $source_name"
fi
diff --git a/python-eventlet/bin/release b/python-eventlet/bin/release
index 6480e7a..49d1082 100755
--- a/python-eventlet/bin/release
+++ b/python-eventlet/bin/release
@@ -3,46 +3,158 @@ cd "$( dirname "${BASH_SOURCE[0]}" )/.."
if [[ ! -d venv-release ]]; then
virtualenv venv-release
echo '*' >venv-release/.gitignore
- venv-release/bin/pip install wheel sphinx
+ venv-release/bin/pip install -U pip setuptools sphinx wheel
fi
. $PWD/venv-release/bin/activate
pip install -e $PWD
+version=
+version_next=
+
main() {
branch="${1-$(git symbolic-ref --short HEAD)}"
version="$(python -c 'import eventlet; print(eventlet.__version__)')"
- printf "branch: %s version: '%s'\n" $branch $version >&2
+ printf "\nbranch: %s eventlet.__version__: '%s'\n" $branch $version >&2
if [[ "$branch" != "master" ]]; then
echo "Must be on master" >&2
exit 1
fi
if [[ -n "$(git status --short -uall)" ]]; then
- echo "Tree must be clean" >&2
+ echo "Tree must be clean. git status:" >&2
+ echo "" >&2
+ git status --short -uall
+ echo "" >&2
exit 1
fi
+ last_commit_message=$(git show --format="%s" --no-patch HEAD)
+ expect_commit_message="v$version release"
+ if [[ "$last_commit_message" != "$expect_commit_message" ]]; then
+ printf "Last commit message: '%s' expected: '%s'\n" "$last_commit_message" "$expect_commit_message" >&2
+ if confirm "Create release commit? [yN] "; then
+ create_commit
+ elif ! confirm "Continue without proper release commit? [yN] "; then
+ exit 1
+ fi
+ fi
confirm "Continue? [yN] " || exit 1
+ echo "Creating tag v$version" >&2
if ! git tag "v$version"; then
- echo "tag failed" >&2
+ echo "git tag failed " >&2
confirm "Continue still? [yN] " || exit 1
fi
+ if confirm "Build documentation (website)? [Yn] " >&2; then
+ bin/build-website.bash || exit 1
+ fi
+
if confirm "Upload to PyPi? [Yn] "; then
rm -rf build dist
- python setup.py sdist bdist_wheel register upload
+ python setup.py sdist bdist_wheel register upload || exit 1
fi
- bin/build-website.bash
-
- git push origin master
+ git push --verbose origin master gh-pages || exit 1
git push --tags
- git push origin gh-pages
+}
+
+create_commit() {
+ echo "" >&2
+ echo "Plan:" >&2
+ echo "1. bump version" >&2
+ echo "2. update NEWS, AUTHORS" >&2
+ echo "3. commit" >&2
+ echo "4. run bin/release again" >&2
+ echo "" >&2
+
+ bump_version
+ edit_news
+
+ git diff
+ confirm "Ready to commit? [Yn] " || exit 1
+ git commit -a -m "v$version_next release"
+
+ echo "Re-exec $0 to continue" >&2
+ exec $0
+}
+
+bump_version() {
+ local current=$version
+ echo "Current version: '$current'" >&2
+ echo -n "Enter next version (empty to abort): " >&2
+ read version_next
+ if [[ -z "$version_next" ]]; then
+ exit 1
+ fi
+ echo "Next version: '$version_next'" >&2
+
+ local current_tuple="${current//./, }"
+ local next_tuple="${version_next//./, }"
+ local version_path="eventlet/__init__.py"
+ echo "Updating file '$version_path'" >&2
+ if ! sed -i '' -e "s/($current_tuple)/($next_tuple)/" "$version_path"; then
+ echo "sed error $?" >&2
+ exit 1
+ fi
+ if git diff --exit-code "$version_path"; then
+ echo "File '$version_path' is not modified" >&2
+ exit 1
+ fi
+ echo "" >&2
+
+ local doc_path="doc/real_index.html"
+ echo "Updating file '$doc_path'" >&2
+ if ! sed -i '' -e "s/$current/$version_next/g" "$doc_path"; then
+ echo "sed error $?" >&2
+ exit 1
+ fi
+ if git diff --exit-code "$doc_path"; then
+ echo "File '$doc_path' is not modified" >&2
+ exit 1
+ fi
+ echo "" >&2
+
+ confirm "Confirm changes? [yN] " || exit 1
+}
+
+edit_news() {
+ echo "Changes since last release:" >&2
+ git log --format='%h %an %s' "v$version"^.. -- || exit 1
+ echo "" >&2
+
+ local editor=$(which edit 2>/dev/null)
+ [[ -z "$editor" ]] && editor="$EDITOR"
+ if [[ -n "$editor" ]]; then
+ if confirm "Open default editor for NEWS and AUTHORS? [Yn] "; then
+ $editor NEWS
+ $editor AUTHORS
+ else
+ confirm "Change files NEWS and AUTHORS manually and press any key"
+ fi
+ else
+ echo "Unable to determine default text editor." >&2
+ confirm "Change files NEWS and AUTHORS manually and press any key"
+ fi
+ echo "" >&2
+
+ if git diff --exit-code NEWS AUTHORS; then
+ echo "Files NEWS and AUTHORS are not modified" >&2
+ exit 1
+ fi
+ echo "" >&2
+
+ confirm "Confirm changes? [yN] " || exit 1
}
confirm() {
- read -n1 -p "$1" reply
- echo ""
+ local reply
+ local prompt="$1"
+ read -n1 -p "$prompt" reply >&2
+ echo "" >&2
rc=0
+ local default_y=" \[Yn\] $"
+ if [[ -z "$reply" ]] && [[ "$prompt" =~ $default_y ]]; then
+ reply="y"
+ fi
[[ "$reply" != "y" ]] && rc=1
return $rc
}
diff --git a/python-eventlet/doc/Makefile b/python-eventlet/doc/Makefile
index 076db3a..64f0134 100644
--- a/python-eventlet/doc/Makefile
+++ b/python-eventlet/doc/Makefile
@@ -11,10 +11,11 @@ PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+.PHONY: help clean text html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
help:
@echo "Please use \`make ' where is one of"
+ @echo " text to make text files"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " pickle to make pickle files"
@@ -30,6 +31,11 @@ help:
clean:
-rm -rf _build/*
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) _build/text
+ @echo
+ @echo "Build finished. The text files are in _build/text."
+
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html
@echo
diff --git a/python-eventlet/doc/design_patterns.rst b/python-eventlet/doc/design_patterns.rst
index f27f37d..0f84409 100644
--- a/python-eventlet/doc/design_patterns.rst
+++ b/python-eventlet/doc/design_patterns.rst
@@ -14,7 +14,7 @@ The canonical client-side example is a web crawler. This use case is given a li
from eventlet.green import urllib2
urls = ["http://www.google.com/intl/en_ALL/images/logo.gif",
- "https://wiki.secondlife.com/w/images/secondlife.jpg",
+ "https://www.python.org/static/img/python-logo.png",
"http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif"]
def fetch(url):
diff --git a/python-eventlet/doc/real_index.html b/python-eventlet/doc/real_index.html
index d9a7d60..22a0ba0 100644
--- a/python-eventlet/doc/real_index.html
+++ b/python-eventlet/doc/real_index.html
@@ -1,8 +1,8 @@
-
+
-
+
Eventlet Networking Library
@@ -46,15 +46,15 @@
Installation
-To install eventlet, simply:
+
To install Eventlet, simply:
pip install eventlet
-Alternately, you can download the source tarball:
+Alternately, you can download the source archive:
diff --git a/python-eventlet/eventlet/__init__.py b/python-eventlet/eventlet/__init__.py
index 1bdcf52..444e37f 100644
--- a/python-eventlet/eventlet/__init__.py
+++ b/python-eventlet/eventlet/__init__.py
@@ -1,4 +1,4 @@
-version_info = (0, 17, 4)
+version_info = (0, 18, 4)
__version__ = '.'.join(map(str, version_info))
try:
diff --git a/python-eventlet/eventlet/backdoor.py b/python-eventlet/eventlet/backdoor.py
index 2067772..9a6797a 100644
--- a/python-eventlet/eventlet/backdoor.py
+++ b/python-eventlet/eventlet/backdoor.py
@@ -4,6 +4,8 @@ from code import InteractiveConsole
import errno
import socket
import sys
+import errno
+import traceback
import eventlet
from eventlet import hubs
@@ -69,7 +71,12 @@ class SocketConsole(greenlets.greenlet):
def finalize(self):
# restore the state of the socket
self.desc = None
- print("backdoor closed to %s:%s" % self.hostport)
+ if len(self.hostport) >= 2:
+ host = self.hostport[0]
+ port = self.hostport[1]
+ print("backdoor closed to %s:%s" % (host, port,))
+ else:
+ print('backdoor closed')
def backdoor_server(sock, locals=None):
@@ -81,7 +88,16 @@ def backdoor_server(sock, locals=None):
of the interpreters. It can be convenient to stick important application
variables in here.
"""
- print("backdoor server listening on %s:%s" % sock.getsockname())
+ listening_on = sock.getsockname()
+ if sock.family == socket.AF_INET:
+ # Expand result to IP + port
+ listening_on = '%s:%s' % listening_on
+ elif sock.family == socket.AF_INET6:
+ ip, port, _, _ = listening_on
+ listening_on = '%s:%s' % (ip, port,)
+ # No action needed if sock.family == socket.AF_UNIX
+
+ print("backdoor server listening on %s" % (listening_on,))
try:
try:
while True:
@@ -102,10 +118,16 @@ def backdoor(conn_info, locals=None):
(such as backdoor_server).
"""
conn, addr = conn_info
- host, port = addr
- print("backdoor to %s:%s" % (host, port))
+ if conn.family == socket.AF_INET:
+ host, port = addr
+ print("backdoor to %s:%s" % (host, port))
+ elif conn.family == socket.AF_INET6:
+ host, port, _, _ = addr
+ print("backdoor to %s:%s" % (host, port))
+ else:
+ print('backdoor opened')
fl = conn.makefile("rw")
- console = SocketConsole(fl, (host, port), locals)
+ console = SocketConsole(fl, addr, locals)
hub = hubs.get_hub()
hub.schedule_call_global(0, console.switch)
diff --git a/python-eventlet/eventlet/green/OpenSSL/__init__.py b/python-eventlet/eventlet/green/OpenSSL/__init__.py
index 56bfb8a..26b60d9 100644
--- a/python-eventlet/eventlet/green/OpenSSL/__init__.py
+++ b/python-eventlet/eventlet/green/OpenSSL/__init__.py
@@ -1,5 +1,5 @@
-import rand
-import crypto
-import SSL
-import tsafe
-from version import __version__
+from . import rand
+from . import crypto
+from . import SSL
+from . import tsafe
+from .version import __version__
diff --git a/python-eventlet/eventlet/green/select.py b/python-eventlet/eventlet/green/select.py
index 53fb359..d1cba12 100644
--- a/python-eventlet/eventlet/green/select.py
+++ b/python-eventlet/eventlet/green/select.py
@@ -6,6 +6,9 @@ from eventlet.support import six
__patched__ = ['select']
+# FIXME: must also delete `poll`, but it breaks subprocess `communicate()`
+# https://github.com/eventlet/eventlet/issues/290
+__deleted__ = ['devpoll', 'epoll', 'kqueue', 'kevent']
def get_fileno(obj):
diff --git a/python-eventlet/eventlet/green/selectors.py b/python-eventlet/eventlet/green/selectors.py
index 26427ec..81fc862 100644
--- a/python-eventlet/eventlet/green/selectors.py
+++ b/python-eventlet/eventlet/green/selectors.py
@@ -3,9 +3,32 @@ import sys
from eventlet import patcher
from eventlet.green import select
+__patched__ = [
+ 'DefaultSelector',
+ 'SelectSelector',
+]
+
+# We only have green select so the options are:
+# * leave it be and have selectors that block
+# * try to pretend the "bad" selectors don't exist
+# * replace all with SelectSelector for the price of possibly different
+# performance characteristic and missing fileno() method (if someone
+# uses it it'll result in a crash, we may want to implement it in the future)
+#
+# This module used to follow the third approach but just removing the offending
+# selectors is less error prone and less confusing approach.
+__deleted__ = [
+ 'PollSelector',
+ 'EpollSelector',
+ 'DevpollSelector',
+ 'KqueueSelector',
+]
+
patcher.inject('selectors', globals(), ('select', select))
del patcher
if sys.platform != 'win32':
SelectSelector._select = staticmethod(select.select)
+
+DefaultSelector = SelectSelector
diff --git a/python-eventlet/eventlet/green/ssl.py b/python-eventlet/eventlet/green/ssl.py
index ded6533..81eae3d 100644
--- a/python-eventlet/eventlet/green/ssl.py
+++ b/python-eventlet/eventlet/green/ssl.py
@@ -22,7 +22,9 @@ else:
has_ciphers = False
timeout_exc = orig_socket.timeout
-__patched__ = ['SSLSocket', 'wrap_socket', 'sslwrap_simple']
+__patched__ = [
+ 'SSLSocket', 'SSLContext', 'wrap_socket', 'sslwrap_simple',
+ 'create_default_context', '_create_default_https_context']
_original_sslsocket = __ssl.SSLSocket
@@ -357,11 +359,27 @@ if hasattr(__ssl, 'sslwrap_simple'):
if hasattr(__ssl, 'SSLContext'):
- @functools.wraps(__ssl.SSLContext.wrap_socket)
- def _green_sslcontext_wrap_socket(self, sock, *a, **kw):
- return GreenSSLSocket(sock, *a, _context=self, **kw)
-
- # FIXME:
- # * GreenSSLContext akin to GreenSSLSocket
- # * make ssl.create_default_context() use modified SSLContext from globals as usual
- __ssl.SSLContext.wrap_socket = _green_sslcontext_wrap_socket
+ _original_sslcontext = __ssl.SSLContext
+
+ class GreenSSLContext(_original_sslcontext):
+ __slots__ = ()
+
+ def wrap_socket(self, sock, *a, **kw):
+ return GreenSSLSocket(sock, *a, _context=self, **kw)
+
+ SSLContext = GreenSSLContext
+
+ if hasattr(__ssl, 'create_default_context'):
+ _original_create_default_context = __ssl.create_default_context
+
+ def green_create_default_context(*a, **kw):
+ # We can't just monkey-patch on the green version of `wrap_socket`
+ # on to SSLContext instances, but SSLContext.create_default_context
+ # does a bunch of work. Rather than re-implementing it all, just
+ # switch out the __class__ to get our `wrap_socket` implementation
+ context = _original_create_default_context(*a, **kw)
+ context.__class__ = GreenSSLContext
+ return context
+
+ create_default_context = green_create_default_context
+ _create_default_https_context = green_create_default_context
diff --git a/python-eventlet/eventlet/green/subprocess.py b/python-eventlet/eventlet/green/subprocess.py
index 7ce38cf..1347404 100644
--- a/python-eventlet/eventlet/green/subprocess.py
+++ b/python-eventlet/eventlet/green/subprocess.py
@@ -17,6 +17,7 @@ if sys.version_info > (3, 4):
patcher.inject('subprocess', globals(), *to_patch)
subprocess_orig = __import__("subprocess")
+mswindows = sys.platform == "win32"
if getattr(subprocess_orig, 'TimeoutExpired', None) is None:
@@ -46,7 +47,7 @@ class Popen(subprocess_orig.Popen):
# Windows. (see eventlet.greenio.set_nonblocking()) As the sole purpose of
# this __init__() override is to wrap the pipes for eventlet-friendly
# non-blocking I/O, don't even bother overriding it on Windows.
- if not subprocess_orig.mswindows:
+ if not mswindows:
def __init__(self, args, bufsize=0, *argss, **kwds):
self.args = args
# Forward the call to base-class constructor
@@ -55,8 +56,19 @@ class Popen(subprocess_orig.Popen):
# eventlet.processes.Process.run() method.
for attr in "stdin", "stdout", "stderr":
pipe = getattr(self, attr)
- if pipe is not None and not type(pipe) == greenio.GreenPipe:
- wrapped_pipe = greenio.GreenPipe(pipe, pipe.mode, bufsize)
+ if pipe is not None and type(pipe) != greenio.GreenPipe:
+ # https://github.com/eventlet/eventlet/issues/243
+ # AttributeError: '_io.TextIOWrapper' object has no attribute 'mode'
+ mode = getattr(pipe, 'mode', '')
+ if not mode:
+ if pipe.readable():
+ mode += 'r'
+ if pipe.writable():
+ mode += 'w'
+ # ValueError: can't have unbuffered text I/O
+ if bufsize == 0:
+ bufsize = -1
+ wrapped_pipe = greenio.GreenPipe(pipe, mode, bufsize)
setattr(self, attr, wrapped_pipe)
__init__.__doc__ = subprocess_orig.Popen.__init__.__doc__
@@ -82,7 +94,7 @@ class Popen(subprocess_orig.Popen):
raise
wait.__doc__ = subprocess_orig.Popen.wait.__doc__
- if not subprocess_orig.mswindows:
+ if not mswindows:
# don't want to rewrite the original _communicate() method, we
# just want a version that uses eventlet.green.select.select()
# instead of select.select().
diff --git a/python-eventlet/eventlet/greenio/base.py b/python-eventlet/eventlet/greenio/base.py
index 8da51ca..e1c4a45 100644
--- a/python-eventlet/eventlet/greenio/base.py
+++ b/python-eventlet/eventlet/greenio/base.py
@@ -1,13 +1,13 @@
import errno
import os
-from socket import socket as _original_socket
import socket
import sys
import time
import warnings
-from eventlet.support import get_errno, six
+import eventlet
from eventlet.hubs import trampoline, notify_opened, IOClosed
+from eventlet.support import get_errno, six
__all__ = [
'GreenSocket', '_GLOBAL_DEFAULT_TIMEOUT', 'set_nonblocking',
@@ -24,6 +24,8 @@ if sys.platform[:3] == "win":
if six.PY2:
_python2_fileobject = socket._fileobject
+_original_socket = eventlet.patcher.original('socket').socket
+
def socket_connect(descriptor, address):
"""
@@ -304,73 +306,80 @@ class GreenSocket(object):
"makefile instead", DeprecationWarning, stacklevel=2)
return self.makefile(*args, **kw)
- def recv(self, buflen, flags=0):
+ def _read_trampoline(self):
+ self._trampoline(
+ self.fd,
+ read=True,
+ timeout=self.gettimeout(),
+ timeout_exc=socket.timeout("timed out"))
+
+ def _recv_loop(self, recv_meth, *args):
fd = self.fd
if self.act_non_blocking:
- return fd.recv(buflen, flags)
+ return recv_meth(*args)
+
while True:
try:
- return fd.recv(buflen, flags)
+ # recv: bufsize=0?
+ # recv_into: buffer is empty?
+ # This is needed because behind the scenes we use sockets in
+ # nonblocking mode and builtin recv* methods. Attempting to read
+ # 0 bytes from a nonblocking socket using a builtin recv* method
+ # does not raise a timeout exception. Since we're simulating
+ # a blocking socket here we need to produce a timeout exception
+ # if needed, hence the call to trampoline.
+ if not args[0]:
+ self._read_trampoline()
+ return recv_meth(*args)
except socket.error as e:
if get_errno(e) in SOCKET_BLOCKING:
pass
elif get_errno(e) in SOCKET_CLOSED:
- return ''
+ return b''
else:
raise
+
try:
- self._trampoline(
- fd,
- read=True,
- timeout=self.gettimeout(),
- timeout_exc=socket.timeout("timed out"))
+ self._read_trampoline()
except IOClosed as e:
# Perhaps we should return '' instead?
raise EOFError()
- def recvfrom(self, *args):
- if not self.act_non_blocking:
- self._trampoline(self.fd, read=True, timeout=self.gettimeout(),
- timeout_exc=socket.timeout("timed out"))
- return self.fd.recvfrom(*args)
+ def recv(self, bufsize, flags=0):
+ return self._recv_loop(self.fd.recv, bufsize, flags)
- def recvfrom_into(self, *args):
- if not self.act_non_blocking:
- self._trampoline(self.fd, read=True, timeout=self.gettimeout(),
- timeout_exc=socket.timeout("timed out"))
- return self.fd.recvfrom_into(*args)
+ def recvfrom(self, bufsize, flags=0):
+ return self._recv_loop(self.fd.recvfrom, bufsize, flags)
- def recv_into(self, *args):
- if not self.act_non_blocking:
- self._trampoline(self.fd, read=True, timeout=self.gettimeout(),
- timeout_exc=socket.timeout("timed out"))
- return self.fd.recv_into(*args)
+ def recv_into(self, buffer, nbytes=0, flags=0):
+ return self._recv_loop(self.fd.recv_into, buffer, nbytes, flags)
- def send(self, data, flags=0):
- fd = self.fd
+ def recvfrom_into(self, buffer, nbytes=0, flags=0):
+ return self._recv_loop(self.fd.recvfrom_into, buffer, nbytes, flags)
+
+ def _send_loop(self, send_method, data, *args):
if self.act_non_blocking:
- return fd.send(data, flags)
+ return send_method(data, *args)
- # blocking socket behavior - sends all, blocks if the buffer is full
- total_sent = 0
- len_data = len(data)
while 1:
try:
- total_sent += fd.send(data[total_sent:], flags)
+ return send_method(data, *args)
except socket.error as e:
- if get_errno(e) not in SOCKET_BLOCKING:
+ eno = get_errno(e)
+ if eno == errno.ENOTCONN or eno not in SOCKET_BLOCKING:
raise
- if total_sent == len_data:
- break
-
try:
self._trampoline(self.fd, write=True, timeout=self.gettimeout(),
timeout_exc=socket.timeout("timed out"))
except IOClosed:
raise socket.error(errno.ECONNRESET, 'Connection closed by another thread')
- return total_sent
+ def send(self, data, flags=0):
+ return self._send_loop(self.fd.send, data, flags)
+
+ def sendto(self, data, *args):
+ return self._send_loop(self.fd.sendto, data, *args)
def sendall(self, data, flags=0):
tail = self.send(data, flags)
@@ -378,10 +387,6 @@ class GreenSocket(object):
while tail < len_data:
tail += self.send(data[tail:], flags)
- def sendto(self, *args):
- self._trampoline(self.fd, write=True)
- return self.fd.sendto(*args)
-
def setblocking(self, flag):
if flag:
self.act_non_blocking = False
diff --git a/python-eventlet/eventlet/greenio/py2.py b/python-eventlet/eventlet/greenio/py2.py
index a0e9efe..471391b 100644
--- a/python-eventlet/eventlet/greenio/py2.py
+++ b/python-eventlet/eventlet/greenio/py2.py
@@ -38,7 +38,7 @@ class GreenPipe(_fileobject):
self._name = f.name
f.close()
- super(GreenPipe, self).__init__(_SocketDuckForFd(fileno), mode)
+ super(GreenPipe, self).__init__(_SocketDuckForFd(fileno), mode, bufsize)
set_nonblocking(self)
self.softspace = 0
diff --git a/python-eventlet/eventlet/greenio/py3.py b/python-eventlet/eventlet/greenio/py3.py
index 338ac68..f2248e1 100644
--- a/python-eventlet/eventlet/greenio/py3.py
+++ b/python-eventlet/eventlet/greenio/py3.py
@@ -75,10 +75,26 @@ class GreenFileIO(_OriginalIOBase):
def fileno(self):
return self._fileno
- def read(self, buflen):
+ def read(self, size=-1):
+ if size == -1:
+ return self.readall()
+
+ while True:
+ try:
+ return _original_os.read(self._fileno, size)
+ except OSError as e:
+ if get_errno(e) not in SOCKET_BLOCKING:
+ raise IOError(*e.args)
+ self._trampoline(self, read=True)
+
+ def readall(self):
+ buf = []
while True:
try:
- return _original_os.read(self._fileno, buflen)
+ chunk = _original_os.read(self._fileno, DEFAULT_BUFFER_SIZE)
+ if chunk == b'':
+ return b''.join(buf)
+ buf.append(chunk)
except OSError as e:
if get_errno(e) not in SOCKET_BLOCKING:
raise IOError(*e.args)
@@ -116,14 +132,19 @@ class GreenFileIO(_OriginalIOBase):
self._closed = True
def write(self, data):
- while True:
+ view = memoryview(data)
+ datalen = len(data)
+ offset = 0
+ while offset < datalen:
try:
- return _original_os.write(self._fileno, data)
+ written = _original_os.write(self._fileno, view[offset:])
except OSError as e:
if get_errno(e) not in SOCKET_BLOCKING:
raise IOError(*e.args)
- else:
- trampoline(self, write=True)
+ trampoline(self, write=True)
+ else:
+ offset += written
+ return offset
def close(self):
if not self._closed:
diff --git a/python-eventlet/eventlet/patcher.py b/python-eventlet/eventlet/patcher.py
index eb09f9a..3a9804e 100644
--- a/python-eventlet/eventlet/patcher.py
+++ b/python-eventlet/eventlet/patcher.py
@@ -251,27 +251,19 @@ def monkey_patch(**on):
on.setdefault(modname, default_on)
modules_to_patch = []
- if on['os'] and not already_patched.get('os'):
- modules_to_patch += _green_os_modules()
- already_patched['os'] = True
- if on['select'] and not already_patched.get('select'):
- modules_to_patch += _green_select_modules()
- already_patched['select'] = True
- if on['socket'] and not already_patched.get('socket'):
- modules_to_patch += _green_socket_modules()
- already_patched['socket'] = True
- if on['thread'] and not already_patched.get('thread'):
- modules_to_patch += _green_thread_modules()
- already_patched['thread'] = True
- if on['time'] and not already_patched.get('time'):
- modules_to_patch += _green_time_modules()
- already_patched['time'] = True
- if on.get('MySQLdb') and not already_patched.get('MySQLdb'):
- modules_to_patch += _green_MySQLdb()
- already_patched['MySQLdb'] = True
- if on.get('builtins') and not already_patched.get('builtins'):
- modules_to_patch += _green_builtins()
- already_patched['builtins'] = True
+ for name, modules_function in [
+ ('os', _green_os_modules),
+ ('select', _green_select_modules),
+ ('socket', _green_socket_modules),
+ ('thread', _green_thread_modules),
+ ('time', _green_time_modules),
+ ('MySQLdb', _green_MySQLdb),
+ ('builtins', _green_builtins),
+ ]:
+ if on[name] and not already_patched.get(name):
+ modules_to_patch += modules_function()
+ already_patched[name] = True
+
if on['psycopg'] and not already_patched.get('psycopg'):
try:
from eventlet.support import psycopg2_patcher
@@ -295,6 +287,10 @@ def monkey_patch(**on):
patched_attr = getattr(mod, attr_name, None)
if patched_attr is not None:
setattr(orig_mod, attr_name, patched_attr)
+ deleted = getattr(mod, '__deleted__', [])
+ for attr_name in deleted:
+ if hasattr(orig_mod, attr_name):
+ delattr(orig_mod, attr_name)
finally:
imp.release_lock()
@@ -331,7 +327,13 @@ def _green_os_modules():
def _green_select_modules():
from eventlet.green import select
- return [('select', select)]
+ modules = [('select', select)]
+
+ if sys.version_info >= (3, 4):
+ from eventlet.green import selectors
+ modules.append(('selectors', selectors))
+
+ return modules
def _green_socket_modules():
diff --git a/python-eventlet/eventlet/queue.py b/python-eventlet/eventlet/queue.py
index 5a82238..31248db 100644
--- a/python-eventlet/eventlet/queue.py
+++ b/python-eventlet/eventlet/queue.py
@@ -50,6 +50,7 @@ from eventlet.event import Event
from eventlet.greenthread import getcurrent
from eventlet.hubs import get_hub
from eventlet.support import six
+from eventlet.support.six.moves import queue as Stdlib_Queue
from eventlet.timeout import Timeout
@@ -145,9 +146,10 @@ class Waiter(object):
class LightQueue(object):
"""
This is a variant of Queue that behaves mostly like the standard
- :class:`Queue`. It differs by not supporting the
- :meth:`task_done ` or :meth:`join ` methods,
- and is a little faster for not having that overhead.
+ :class:`Stdlib_Queue`. It differs by not supporting the
+ :meth:`task_done ` or
+ :meth:`join ` methods, and is a little faster for
+ not having that overhead.
"""
def __init__(self, maxsize=None):
@@ -381,11 +383,11 @@ class Queue(LightQueue):
If *maxsize* is less than zero or ``None``, the queue size is infinite.
``Queue(0)`` is a channel, that is, its :meth:`put` method always blocks
- until the item is delivered. (This is unlike the standard :class:`Queue`,
- where 0 means infinite size).
+ until the item is delivered. (This is unlike the standard
+ :class:`Stdlib_Queue`, where 0 means infinite size).
- In all other respects, this Queue class resembled the standard library,
- :class:`Queue`.
+ In all other respects, this Queue class resembles the standard library,
+ :class:`Stdlib_Queue`.
'''
def __init__(self, maxsize=None):
diff --git a/python-eventlet/eventlet/tpool.py b/python-eventlet/eventlet/tpool.py
index 8d73814..618c377 100644
--- a/python-eventlet/eventlet/tpool.py
+++ b/python-eventlet/eventlet/tpool.py
@@ -13,11 +13,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import atexit
import imp
import os
import sys
import traceback
+import eventlet
from eventlet import event, greenio, greenthread, patcher, timeout
from eventlet.support import six
@@ -39,7 +41,7 @@ if six.PY3:
Empty = Queue_module.Empty
Queue = Queue_module.Queue
-_bytetosend = ' '.encode()
+_bytetosend = b' '
_coro = None
_nthreads = int(os.environ.get('EVENTLET_THREADPOOL_SIZE', 20))
_reqq = _rspq = None
@@ -54,6 +56,7 @@ def tpool_trampoline():
try:
_c = _rsock.recv(1)
assert _c
+ # FIXME: this is probably redundant since using sockets instead of pipe now
except ValueError:
break # will be raised when pipe is closed
while not _rspq.empty():
@@ -250,29 +253,33 @@ class Proxy(object):
def setup():
- global _rsock, _wsock, _threads, _coro, _setup_already, _rspq, _reqq
+ global _rsock, _wsock, _coro, _setup_already, _rspq, _reqq
if _setup_already:
return
else:
_setup_already = True
+ assert _nthreads >= 0, "Can't specify negative number of threads"
+ if _nthreads == 0:
+ import warnings
+ warnings.warn("Zero threads in tpool. All tpool.execute calls will\
+ execute in main thread. Check the value of the environment \
+ variable EVENTLET_THREADPOOL_SIZE.", RuntimeWarning)
+ _reqq = Queue(maxsize=-1)
+ _rspq = Queue(maxsize=-1)
+
+ # connected socket pair
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
sock.listen(1)
csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(sock.getsockname())
+ csock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
_wsock, _addr = sock.accept()
+ _wsock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
sock.close()
_rsock = greenio.GreenSocket(csock)
- _reqq = Queue(maxsize=-1)
- _rspq = Queue(maxsize=-1)
- assert _nthreads >= 0, "Can't specify negative number of threads"
- if _nthreads == 0:
- import warnings
- warnings.warn("Zero threads in tpool. All tpool.execute calls will\
- execute in main thread. Check the value of the environment \
- variable EVENTLET_THREADPOOL_SIZE.", RuntimeWarning)
for i in six.moves.range(_nthreads):
t = threading.Thread(target=tworker,
name="tpool_thread_%s" % i)
@@ -281,12 +288,20 @@ def setup():
_threads.append(t)
_coro = greenthread.spawn_n(tpool_trampoline)
+ # This yield fixes subtle error with GreenSocket.__del__
+ eventlet.sleep(0)
+# Avoid ResourceWarning unclosed socket on Python3.2+
+@atexit.register
def killall():
global _setup_already, _rspq, _rsock, _wsock
if not _setup_already:
return
+
+ # This yield fixes freeze in some scenarios
+ eventlet.sleep(0)
+
for thr in _threads:
_reqq.put(None)
for thr in _threads:
@@ -294,7 +309,7 @@ def killall():
del _threads[:]
# return any remaining results
- while not _rspq.empty():
+ while (_rspq is not None) and not _rspq.empty():
try:
(e, rv) = _rspq.get(block=False)
e.send(rv)
@@ -304,10 +319,12 @@ def killall():
if _coro is not None:
greenthread.kill(_coro)
- _rsock.close()
- _wsock.close()
- _rsock = None
- _wsock = None
+ if _rsock is not None:
+ _rsock.close()
+ _rsock = None
+ if _wsock is not None:
+ _wsock.close()
+ _wsock = None
_rspq = None
_setup_already = False
diff --git a/python-eventlet/eventlet/websocket.py b/python-eventlet/eventlet/websocket.py
index 3c93e90..9321956 100644
--- a/python-eventlet/eventlet/websocket.py
+++ b/python-eventlet/eventlet/websocket.py
@@ -560,7 +560,7 @@ class RFC6455WebSocket(WebSocket):
decoder = self.UTF8Decoder() if opcode == 1 else None
message = self.Message(opcode, decoder=decoder)
if not length:
- message.push('', final=finished)
+ message.push(b'', final=finished)
else:
while received < length:
d = self.socket.recv(length - received)
diff --git a/python-eventlet/eventlet/wsgi.py b/python-eventlet/eventlet/wsgi.py
index 7258277..6af2b99 100644
--- a/python-eventlet/eventlet/wsgi.py
+++ b/python-eventlet/eventlet/wsgi.py
@@ -1,4 +1,5 @@
import errno
+import functools
import os
import sys
import time
@@ -6,13 +7,12 @@ import traceback
import types
import warnings
-from eventlet.green import BaseHTTPServer
-from eventlet.green import socket
from eventlet import greenio
from eventlet import greenpool
from eventlet import support
+from eventlet.green import BaseHTTPServer
+from eventlet.green import socket
from eventlet.support import six
-
from eventlet.support.six.moves import urllib
@@ -50,6 +50,10 @@ BAD_SOCK = set((errno.EBADF, 10053))
BROKEN_SOCK = set((errno.EPIPE, errno.ECONNRESET))
+class ChunkReadError(ValueError):
+ pass
+
+
# special flag return value for apps
class _AlreadyHandled(object):
@@ -109,19 +113,18 @@ class Input(object):
towrite.append(b'\r\n')
self.wfile.writelines(towrite)
+ self.wfile.flush()
# Reinitialize chunk_length (expect more data)
self.chunk_length = -1
def _do_read(self, reader, length=None):
- if self.wfile is not None and \
- not self.is_hundred_continue_response_sent:
+ if self.wfile is not None and not self.is_hundred_continue_response_sent:
# 100 Continue response
self.send_hundred_continue_response()
self.is_hundred_continue_response_sent = True
- if length is None and self.content_length is not None:
- length = self.content_length - self.position
- if length and length > self.content_length - self.position:
+ if (self.content_length is not None) and (
+ length is None or length > self.content_length - self.position):
length = self.content_length - self.position
if not length:
return b''
@@ -133,8 +136,7 @@ class Input(object):
return read
def _chunked_read(self, rfile, length=None, use_readline=False):
- if self.wfile is not None and \
- not self.is_hundred_continue_response_sent:
+ if self.wfile is not None and not self.is_hundred_continue_response_sent:
# 100 Continue response
self.send_hundred_continue_response()
self.is_hundred_continue_response_sent = True
@@ -176,7 +178,10 @@ class Input(object):
if use_readline and data[-1] == "\n":
break
else:
- self.chunk_length = int(rfile.readline().split(b";", 1)[0], 16)
+ try:
+ self.chunk_length = int(rfile.readline().split(b";", 1)[0], 16)
+ except ValueError as err:
+ raise ChunkReadError(err)
self.position = 0
if self.chunk_length == 0:
rfile.readline()
@@ -216,6 +221,10 @@ class Input(object):
for key, value in headers]
self.hundred_continue_headers = headers
+ def discard(self, buffer_size=16 << 10):
+ while self.read(buffer_size):
+ pass
+
class HeaderLineTooLong(Exception):
pass
@@ -238,6 +247,9 @@ class LoggerFileWrapper(object):
self.log = log
self._debug = debug
+ def error(self, msg, *args, **kwargs):
+ self.write(msg, *args)
+
def info(self, msg, *args, **kwargs):
self.write(msg, *args)
@@ -276,9 +288,23 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
minimum_chunk_size = MINIMUM_CHUNK_SIZE
capitalize_response_headers = True
+ # https://github.com/eventlet/eventlet/issues/295
+ # Stdlib default is 0 (unbuffered), but then `wfile.writelines()` looses data
+ # so before going back to unbuffered, remove any usage of `writelines`.
+ wbufsize = 16 << 10
+
def setup(self):
# overriding SocketServer.setup to correctly handle SSL.Connection objects
conn = self.connection = self.request
+
+ # TCP_QUICKACK is a better alternative to disabling Nagle's algorithm
+ # https://news.ycombinator.com/item?id=10607422
+ if getattr(socket, 'TCP_QUICKACK', None):
+ try:
+ conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, True)
+ except socket.error:
+ pass
+
try:
self.rfile = conn.makefile('rb', self.rbufsize)
self.wfile = conn.makefile('wb', self.wbufsize)
@@ -374,7 +400,7 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
length = [0]
status_code = [200]
- def write(data, _writelines=wfile.writelines):
+ def write(data):
towrite = []
if not headers_set:
raise AssertionError("write() before start_response()")
@@ -423,7 +449,8 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
towrite.append(six.b("%x" % (len(data),)) + b"\r\n" + data + b"\r\n")
else:
towrite.append(data)
- _writelines(towrite)
+ wfile.writelines(towrite)
+ wfile.flush()
length[0] = length[0] + sum(map(len, towrite))
def start_response(status, response_headers, exc_info=None):
@@ -468,6 +495,8 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
minimum_write_chunk_size = int(self.environ.get(
'eventlet.minimum_write_chunk_size', self.minimum_chunk_size))
for data in result:
+ if len(data) == 0:
+ continue
if isinstance(data, six.text_type):
data = data.encode('ascii')
@@ -496,16 +525,20 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
finally:
if hasattr(result, 'close'):
result.close()
- if (self.environ['eventlet.input'].chunked_input or
- self.environ['eventlet.input'].position
- < (self.environ['eventlet.input'].content_length or 0)):
+ request_input = self.environ['eventlet.input']
+ if (request_input.chunked_input or
+ request_input.position < (request_input.content_length or 0)):
# Read and discard body if there was no pending 100-continue
- if not self.environ['eventlet.input'].wfile:
- # NOTE: MINIMUM_CHUNK_SIZE is used here for purpose different than chunking.
- # We use it only cause it's at hand and has reasonable value in terms of
- # emptying the buffer.
- while self.environ['eventlet.input'].read(MINIMUM_CHUNK_SIZE):
- pass
+ if not request_input.wfile and self.close_connection == 0:
+ try:
+ request_input.discard()
+ except ChunkReadError as e:
+ self.close_connection = 1
+ self.server.log.error((
+ 'chunked encoding error while discarding request body.'
+ + ' ip={0} request="{1}" error="{2}"').format(
+ self.get_client_ip(), self.requestline, e,
+ ))
finish = time.time()
for hook, args, kwargs in self.environ['eventlet.posthooks']:
@@ -711,6 +744,24 @@ except ImportError:
ACCEPT_ERRNO = set((errno.EPIPE, errno.EBADF, errno.ECONNRESET))
+def socket_repr(sock):
+ scheme = 'http'
+ if hasattr(sock, 'do_handshake'):
+ scheme = 'https'
+
+ name = sock.getsockname()
+ if sock.family == socket.AF_INET:
+ hier_part = '//{0}:{1}'.format(*name)
+ elif sock.family == socket.AF_INET6:
+ hier_part = '//[{0}]:{1}'.format(*name[:2])
+ elif sock.family == socket.AF_UNIX:
+ hier_part = name
+ else:
+ hier_part = repr(name)
+
+ return scheme + ':' + hier_part
+
+
def server(sock, site,
log=None,
environ=None,
@@ -752,6 +803,7 @@ def server(sock, site,
If not specified, sys.stderr is used.
:param environ: Additional parameters that go into the environ dictionary of every request.
:param max_size: Maximum number of client connections opened at any time by this server.
+ Default is 1024.
:param max_http_version: Set to "HTTP/1.0" to make the server pretend it only supports HTTP 1.0.
This can help with applications or clients that don't behave properly using HTTP 1.1.
:param protocol: Protocol class. Deprecated.
@@ -805,19 +857,8 @@ def server(sock, site,
else:
pool = greenpool.GreenPool(max_size)
try:
- host, port = sock.getsockname()[:2]
- port = ':%s' % (port, )
- if hasattr(sock, 'do_handshake'):
- scheme = 'https'
- if port == ':443':
- port = ''
- else:
- scheme = 'http'
- if port == ':80':
- port = ''
-
- serv.log.info("(%s) wsgi starting up on %s://%s%s/" % (
- serv.pid, scheme, host, port))
+ serv.log.info("(%s) wsgi starting up on %s" % (
+ serv.pid, socket_repr(sock)))
while is_accepting:
try:
client_socket = sock.accept()
diff --git a/python-eventlet/tests/__init__.py b/python-eventlet/tests/__init__.py
index 26c0c2e..0c37cdd 100644
--- a/python-eventlet/tests/__init__.py
+++ b/python-eventlet/tests/__init__.py
@@ -209,6 +209,13 @@ def check_idle_cpu_usage(duration, allowed_part):
r2 = resource.getrusage(resource.RUSAGE_SELF)
utime = r2.ru_utime - r1.ru_utime
stime = r2.ru_stime - r1.ru_stime
+
+ # This check is reliably unreliable on Travis, presumably because of CPU
+ # resources being quite restricted by the build environment. The workaround
+ # is to apply an arbitrary factor that should be enough to make it work nicely.
+ if os.environ.get('TRAVIS') == 'true':
+ allowed_part *= 1.2
+
assert utime + stime < duration * allowed_part, \
"CPU usage over limit: user %.0f%% sys %.0f%% allowed %.0f%%" % (
utime / duration * 100, stime / duration * 100,
@@ -292,15 +299,22 @@ def get_database_auth():
return retval
-def run_python(path):
- if not path.endswith('.py'):
- path += '.py'
- path = os.path.abspath(path)
- src_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+def run_python(path, env=None, args=None):
+ new_argv = [sys.executable]
new_env = os.environ.copy()
- new_env['PYTHONPATH'] = os.pathsep.join(sys.path + [src_dir])
+ if path:
+ if not path.endswith('.py'):
+ path += '.py'
+ path = os.path.abspath(path)
+ new_argv.append(path)
+ src_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ new_env['PYTHONPATH'] = os.pathsep.join(sys.path + [src_dir])
+ if env:
+ new_env.update(env)
+ if args:
+ new_argv.extend(args)
p = subprocess.Popen(
- [sys.executable, path],
+ new_argv,
env=new_env,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
@@ -310,15 +324,18 @@ def run_python(path):
return output
-def run_isolated(path, prefix='tests/isolated/'):
- output = run_python(prefix + path).rstrip()
+def run_isolated(path, prefix='tests/isolated/', env=None, args=None):
+ output = run_python(prefix + path, env=env, args=args).rstrip()
if output.startswith(b'skip'):
parts = output.split(b':', 1)
skip_args = []
if len(parts) > 1:
skip_args.append(parts[1])
raise SkipTest(*skip_args)
- assert output == b'pass', output
+ ok = output == b'pass'
+ if not ok:
+ sys.stderr.write('Isolated test {0} output:\n---\n{1}\n---\n'.format(path, output.decode()))
+ assert ok, 'Expected single line "pass" in stdout'
certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt')
diff --git a/python-eventlet/tests/backdoor_test.py b/python-eventlet/tests/backdoor_test.py
index 6facffe..f932eb0 100644
--- a/python-eventlet/tests/backdoor_test.py
+++ b/python-eventlet/tests/backdoor_test.py
@@ -1,4 +1,8 @@
+import os
+import os.path
+
import eventlet
+
from eventlet import backdoor
from eventlet.green import socket
@@ -13,6 +17,9 @@ class BackdoorTest(LimitedTestCase):
serv = eventlet.spawn(backdoor.backdoor_server, listener)
client = socket.socket()
client.connect(('localhost', listener.getsockname()[1]))
+ self._run_test_on_client_and_server(client, serv)
+
+ def _run_test_on_client_and_server(self, client, server_thread):
f = client.makefile('rw')
assert 'Python' in f.readline()
f.readline() # build info
@@ -25,10 +32,31 @@ class BackdoorTest(LimitedTestCase):
self.assertEqual('>>> ', f.read(4))
f.close()
client.close()
- serv.kill()
+ server_thread.kill()
# wait for the console to discover that it's dead
eventlet.sleep(0.1)
+ def test_server_on_ipv6_socket(self):
+ listener = socket.socket(socket.AF_INET6)
+ listener.bind(('::1', 0))
+ listener.listen(5)
+ serv = eventlet.spawn(backdoor.backdoor_server, listener)
+ client = socket.socket(socket.AF_INET6)
+ client.connect(listener.getsockname())
+ self._run_test_on_client_and_server(client, serv)
+
+ def test_server_on_unix_socket(self):
+ SOCKET_PATH = '/tmp/eventlet_backdoor_test.socket'
+ if os.path.exists(SOCKET_PATH):
+ os.unlink(SOCKET_PATH)
+ listener = socket.socket(socket.AF_UNIX)
+ listener.bind(SOCKET_PATH)
+ listener.listen(5)
+ serv = eventlet.spawn(backdoor.backdoor_server, listener)
+ client = socket.socket(socket.AF_UNIX)
+ client.connect(SOCKET_PATH)
+ self._run_test_on_client_and_server(client, serv)
+
if __name__ == '__main__':
main()
diff --git a/python-eventlet/tests/db_pool_test.py b/python-eventlet/tests/db_pool_test.py
index 9fc9ebc..da72be4 100644
--- a/python-eventlet/tests/db_pool_test.py
+++ b/python-eventlet/tests/db_pool_test.py
@@ -7,7 +7,7 @@ import os
import traceback
from unittest import TestCase, main
-from tests import mock, skipped, skip_unless, skip_with_pyevent, get_database_auth
+from tests import mock, skip_unless, skip_with_pyevent, get_database_auth
from eventlet import event
from eventlet import db_pool
from eventlet.support import six
@@ -116,14 +116,6 @@ class DBConnectionPool(DBTester):
assert self.pool.free() == 1
self.assertRaises(AttributeError, self.connection.cursor)
- @skipped
- def test_deletion_does_a_put(self):
- # doing a put on del causes some issues if __del__ is called in the
- # main coroutine, so, not doing that for now
- assert self.pool.free() == 0
- self.connection = None
- assert self.pool.free() == 1
-
def test_put_doesnt_double_wrap(self):
self.pool.put(self.connection)
conn = self.pool.get()
@@ -213,45 +205,6 @@ class DBConnectionPool(DBTester):
conn.commit()
self.pool.put(conn)
- @skipped
- def test_two_simultaneous_connections(self):
- # timing-sensitive test, disabled until we come up with a better
- # way to do this
- self.pool = self.create_pool(max_size=2)
- conn = self.pool.get()
- self.set_up_dummy_table(conn)
- self.fill_up_table(conn)
- curs = conn.cursor()
- conn2 = self.pool.get()
- self.set_up_dummy_table(conn2)
- self.fill_up_table(conn2)
- curs2 = conn2.cursor()
- results = []
- LONG_QUERY = "select * from test_table"
- SHORT_QUERY = "select * from test_table where row_id <= 20"
-
- evt = event.Event()
-
- def long_running_query():
- self.assert_cursor_works(curs)
- curs.execute(LONG_QUERY)
- results.append(1)
- evt.send()
- evt2 = event.Event()
-
- def short_running_query():
- self.assert_cursor_works(curs2)
- curs2.execute(SHORT_QUERY)
- results.append(2)
- evt2.send()
-
- eventlet.spawn(long_running_query)
- eventlet.spawn(short_running_query)
- evt.wait()
- evt2.wait()
- results.sort()
- self.assertEqual([1, 2], results)
-
def test_clear(self):
self.pool = self.create_pool()
self.pool.put(self.connection)
@@ -318,80 +271,6 @@ class DBConnectionPool(DBTester):
self.connection.close()
self.assertEqual(len(self.pool.free_items), 0)
- @skipped
- def test_max_idle(self):
- # This test is timing-sensitive. Rename the function without
- # the "dont" to run it, but beware that it could fail or take
- # a while.
-
- self.pool = self.create_pool(max_size=2, max_idle=0.02)
- self.connection = self.pool.get()
- self.connection.close()
- self.assertEqual(len(self.pool.free_items), 1)
- eventlet.sleep(0.01) # not long enough to trigger the idle timeout
- self.assertEqual(len(self.pool.free_items), 1)
- self.connection = self.pool.get()
- self.connection.close()
- self.assertEqual(len(self.pool.free_items), 1)
- eventlet.sleep(0.01) # idle timeout should have fired but done nothing
- self.assertEqual(len(self.pool.free_items), 1)
- self.connection = self.pool.get()
- self.connection.close()
- self.assertEqual(len(self.pool.free_items), 1)
- eventlet.sleep(0.03) # long enough to trigger idle timeout for real
- self.assertEqual(len(self.pool.free_items), 0)
-
- @skipped
- def test_max_idle_many(self):
- # This test is timing-sensitive. Rename the function without
- # the "dont" to run it, but beware that it could fail or take
- # a while.
-
- self.pool = self.create_pool(max_size=2, max_idle=0.02)
- self.connection, conn2 = self.pool.get(), self.pool.get()
- self.connection.close()
- eventlet.sleep(0.01)
- self.assertEqual(len(self.pool.free_items), 1)
- conn2.close()
- self.assertEqual(len(self.pool.free_items), 2)
- eventlet.sleep(0.02) # trigger cleanup of conn1 but not conn2
- self.assertEqual(len(self.pool.free_items), 1)
-
- @skipped
- def test_max_age(self):
- # This test is timing-sensitive. Rename the function without
- # the "dont" to run it, but beware that it could fail or take
- # a while.
-
- self.pool = self.create_pool(max_size=2, max_age=0.05)
- self.connection = self.pool.get()
- self.connection.close()
- self.assertEqual(len(self.pool.free_items), 1)
- eventlet.sleep(0.01) # not long enough to trigger the age timeout
- self.assertEqual(len(self.pool.free_items), 1)
- self.connection = self.pool.get()
- self.connection.close()
- self.assertEqual(len(self.pool.free_items), 1)
- eventlet.sleep(0.05) # long enough to trigger age timeout
- self.assertEqual(len(self.pool.free_items), 0)
-
- @skipped
- def test_max_age_many(self):
- # This test is timing-sensitive. Rename the function without
- # the "dont" to run it, but beware that it could fail or take
- # a while.
-
- self.pool = self.create_pool(max_size=2, max_age=0.15)
- self.connection, conn2 = self.pool.get(), self.pool.get()
- self.connection.close()
- self.assertEqual(len(self.pool.free_items), 1)
- eventlet.sleep(0) # not long enough to trigger the age timeout
- self.assertEqual(len(self.pool.free_items), 1)
- eventlet.sleep(0.2) # long enough to trigger age timeout
- self.assertEqual(len(self.pool.free_items), 0)
- conn2.close() # should not be added to the free items
- self.assertEqual(len(self.pool.free_items), 0)
-
def test_waiters_get_woken(self):
# verify that when there's someone waiting on an empty pool
# and someone puts an immediately-closed connection back in
@@ -421,29 +300,6 @@ class DBConnectionPool(DBTester):
self.assertEqual(self.pool.waiting(), 0)
self.pool.put(conn)
- @skipped
- def test_0_straight_benchmark(self):
- """ Benchmark; don't run unless you want to wait a while."""
- import time
- iterations = 20000
- c = self.connection.cursor()
- self.connection.commit()
-
- def bench(c):
- for i in six.moves.range(iterations):
- c.execute('select 1')
-
- bench(c) # warm-up
- results = []
- for i in range(3):
- start = time.time()
- bench(c)
- end = time.time()
- results.append(end - start)
-
- print("\n%u iterations took an average of %f seconds, (%s) in %s\n" % (
- iterations, sum(results) / len(results), results, type(self)))
-
def test_raising_create(self):
# if the create() method raises an exception the pool should
# not lose any connections
diff --git a/python-eventlet/tests/env_test.py b/python-eventlet/tests/env_test.py
index f8931c1..fb7a58b 100644
--- a/python-eventlet/tests/env_test.py
+++ b/python-eventlet/tests/env_test.py
@@ -1,114 +1,46 @@
-import os
-from eventlet.support import six
-from tests.patcher_test import ProcessBase
-from tests import skip_with_pyevent
+import tests
-class Socket(ProcessBase):
- def test_patched_thread(self):
- new_mod = """from eventlet.green import socket
-socket.gethostbyname('localhost')
-socket.getaddrinfo('localhost', 80)
-"""
- os.environ['EVENTLET_TPOOL_DNS'] = 'yes'
- try:
- self.write_to_tempfile("newmod", new_mod)
- output, lines = self.launch_subprocess('newmod.py')
- self.assertEqual(len(lines), 1, lines)
- finally:
- del os.environ['EVENTLET_TPOOL_DNS']
-
+def test_hub_selects():
+ code = 'from eventlet import hubs\nprint(hubs.get_hub())'
+ output = tests.run_python(
+ path=None,
+ env={'EVENTLET_HUB': 'selects'},
+ args=['-c', code],
+ )
+ assert output.count(b'\n') == 1
+ assert b'eventlet.hubs.selects.Hub' in output
-class Tpool(ProcessBase):
- @skip_with_pyevent
- def test_tpool_size(self):
- expected = "40"
- normal = "20"
- new_mod = """from eventlet import tpool
-import eventlet
-import time
-current = [0]
-highwater = [0]
-def count():
- current[0] += 1
- time.sleep(0.1)
- if current[0] > highwater[0]:
- highwater[0] = current[0]
- current[0] -= 1
-expected = %s
-normal = %s
-p = eventlet.GreenPool()
-for i in range(expected*2):
- p.spawn(tpool.execute, count)
-p.waitall()
-assert highwater[0] > 20, "Highwater %%s <= %%s" %% (highwater[0], normal)
-"""
- os.environ['EVENTLET_THREADPOOL_SIZE'] = expected
- try:
- self.write_to_tempfile("newmod", new_mod % (expected, normal))
- output, lines = self.launch_subprocess('newmod.py')
- self.assertEqual(len(lines), 1, lines)
- finally:
- del os.environ['EVENTLET_THREADPOOL_SIZE']
- def test_tpool_negative(self):
- new_mod = """from eventlet import tpool
-import eventlet
-import time
-def do():
- print("should not get here")
-try:
- tpool.execute(do)
-except AssertionError:
- print("success")
-"""
- os.environ['EVENTLET_THREADPOOL_SIZE'] = "-1"
- try:
- self.write_to_tempfile("newmod", new_mod)
- output, lines = self.launch_subprocess('newmod.py')
- self.assertEqual(len(lines), 2, lines)
- self.assertEqual(lines[0], "success", output)
- finally:
- del os.environ['EVENTLET_THREADPOOL_SIZE']
+def test_tpool_dns():
+ code = '''\
+from eventlet.green import socket
+socket.gethostbyname('localhost')
+socket.getaddrinfo('localhost', 80)
+print('pass')
+'''
+ output = tests.run_python(
+ path=None,
+ env={'EVENTLET_TPOOL_DNS': 'yes'},
+ args=['-c', code],
+ )
+ assert output.rstrip() == b'pass'
- def test_tpool_zero(self):
- new_mod = """from eventlet import tpool
-import eventlet
-import time
-def do():
- print("ran it")
-tpool.execute(do)
-"""
- os.environ['EVENTLET_THREADPOOL_SIZE'] = "0"
- try:
- self.write_to_tempfile("newmod", new_mod)
- output, lines = self.launch_subprocess('newmod.py')
- self.assertEqual(len(lines), 4, lines)
- self.assertEqual(lines[-2], 'ran it', lines)
- assert 'Warning' in lines[1] or 'Warning' in lines[0], lines
- finally:
- del os.environ['EVENTLET_THREADPOOL_SIZE']
+@tests.skip_with_pyevent
+def test_tpool_size():
+ expected = '40'
+ normal = '20'
+ tests.run_isolated(
+ path='env_tpool_size.py',
+ env={'EVENTLET_THREADPOOL_SIZE': expected},
+ args=[expected, normal],
+ )
-class Hub(ProcessBase):
- def setUp(self):
- super(Hub, self).setUp()
- self.old_environ = os.environ.get('EVENTLET_HUB')
- os.environ['EVENTLET_HUB'] = 'selects'
+def test_tpool_negative():
+ tests.run_isolated('env_tpool_negative.py', env={'EVENTLET_THREADPOOL_SIZE': '-1'})
- def tearDown(self):
- if self.old_environ:
- os.environ['EVENTLET_HUB'] = self.old_environ
- else:
- del os.environ['EVENTLET_HUB']
- super(Hub, self).tearDown()
- def test_eventlet_hub(self):
- new_mod = """from eventlet import hubs
-print(hubs.get_hub())
-"""
- self.write_to_tempfile("newmod", new_mod)
- output, lines = self.launch_subprocess('newmod.py')
- self.assertEqual(len(lines), 2, "\n".join(lines))
- assert "selects" in lines[0]
+def test_tpool_zero():
+ tests.run_isolated('env_tpool_zero.py', env={'EVENTLET_THREADPOOL_SIZE': '0'})
diff --git a/python-eventlet/tests/greenio_test.py b/python-eventlet/tests/greenio_test.py
index 8a94b7b..99119b3 100644
--- a/python-eventlet/tests/greenio_test.py
+++ b/python-eventlet/tests/greenio_test.py
@@ -3,6 +3,7 @@ import errno
import eventlet
import fcntl
import gc
+from io import DEFAULT_BUFFER_SIZE
import os
import shutil
import socket as _orig_sock
@@ -11,6 +12,7 @@ import tempfile
from nose.tools import eq_
+import eventlet
from eventlet import event, greenio, debug
from eventlet.hubs import get_hub
from eventlet.green import select, socket, time, ssl
@@ -18,10 +20,6 @@ from eventlet.support import capture_stderr, get_errno, six
import tests
-if six.PY3:
- buffer = memoryview
-
-
def bufsized(sock, size=1):
""" Resize both send and receive buffers on a socket.
Useful for testing trampoline. Returns the socket.
@@ -34,6 +32,15 @@ def bufsized(sock, size=1):
return sock
+def expect_socket_timeout(function, *args):
+ try:
+ function(*args)
+ raise AssertionError("socket.timeout not raised")
+ except socket.timeout as e:
+ assert hasattr(e, 'args')
+ eq_(e.args[0], 'timed out')
+
+
def min_buf_size():
"""Return the minimum buffer size that the platform supports."""
test_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
@@ -71,12 +78,9 @@ class TestGreenSocket(tests.LimitedTestCase):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
gs = greenio.GreenSocket(s)
+
try:
- gs.connect(('192.0.2.1', 80))
- self.fail("socket.timeout not raised")
- except socket.timeout as e:
- assert hasattr(e, 'args')
- self.assertEqual(e.args[0], 'timed out')
+ expect_socket_timeout(gs.connect, ('192.0.2.1', 80))
except socket.error as e:
# unreachable is also a valid outcome
if not get_errno(e) in (errno.EHOSTUNREACH, errno.ENETUNREACH):
@@ -89,12 +93,7 @@ class TestGreenSocket(tests.LimitedTestCase):
s.settimeout(0.1)
gs = greenio.GreenSocket(s)
- try:
- gs.accept()
- self.fail("socket.timeout not raised")
- except socket.timeout as e:
- assert hasattr(e, 'args')
- self.assertEqual(e.args[0], 'timed out')
+ expect_socket_timeout(gs.accept)
def test_connect_ex_timeout(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
@@ -125,12 +124,8 @@ class TestGreenSocket(tests.LimitedTestCase):
client.connect(addr)
- try:
- client.recv(8192)
- self.fail("socket.timeout not raised")
- except socket.timeout as e:
- assert hasattr(e, 'args')
- self.assertEqual(e.args[0], 'timed out')
+ expect_socket_timeout(client.recv, 0)
+ expect_socket_timeout(client.recv, 8192)
evt.send()
gt.wait()
@@ -141,12 +136,8 @@ class TestGreenSocket(tests.LimitedTestCase):
gs.settimeout(.1)
gs.bind(('', 0))
- try:
- gs.recvfrom(8192)
- self.fail("socket.timeout not raised")
- except socket.timeout as e:
- assert hasattr(e, 'args')
- self.assertEqual(e.args[0], 'timed out')
+ expect_socket_timeout(gs.recvfrom, 0)
+ expect_socket_timeout(gs.recvfrom, 8192)
def test_recvfrom_into_timeout(self):
buf = array.array('B')
@@ -156,12 +147,7 @@ class TestGreenSocket(tests.LimitedTestCase):
gs.settimeout(.1)
gs.bind(('', 0))
- try:
- gs.recvfrom_into(buf)
- self.fail("socket.timeout not raised")
- except socket.timeout as e:
- assert hasattr(e, 'args')
- self.assertEqual(e.args[0], 'timed out')
+ expect_socket_timeout(gs.recvfrom_into, buf)
def test_recv_into_timeout(self):
buf = array.array('B')
@@ -186,12 +172,7 @@ class TestGreenSocket(tests.LimitedTestCase):
client.connect(addr)
- try:
- client.recv_into(buf)
- self.fail("socket.timeout not raised")
- except socket.timeout as e:
- assert hasattr(e, 'args')
- self.assertEqual(e.args[0], 'timed out')
+ expect_socket_timeout(client.recv_into, buf)
evt.send()
gt.wait()
@@ -214,19 +195,17 @@ class TestGreenSocket(tests.LimitedTestCase):
client = bufsized(greenio.GreenSocket(socket.socket()))
client.connect(addr)
- try:
- client.settimeout(0.00001)
- msg = b"A" * 100000 # large enough number to overwhelm most buffers
- total_sent = 0
- # want to exceed the size of the OS buffer so it'll block in a
- # single send
+ client.settimeout(0.00001)
+ msg = b"A" * 100000 # large enough number to overwhelm most buffers
+
+ # want to exceed the size of the OS buffer so it'll block in a
+ # single send
+ def send():
for x in range(10):
- total_sent += client.send(msg)
- self.fail("socket.timeout not raised")
- except socket.timeout as e:
- assert hasattr(e, 'args')
- self.assertEqual(e.args[0], 'timed out')
+ client.send(msg)
+
+ expect_socket_timeout(send)
evt.send()
gt.wait()
@@ -251,15 +230,9 @@ class TestGreenSocket(tests.LimitedTestCase):
client.settimeout(0.1)
client.connect(addr)
- try:
- msg = b"A" * (8 << 20)
-
- # want to exceed the size of the OS buffer so it'll block
- client.sendall(msg)
- self.fail("socket.timeout not raised")
- except socket.timeout as e:
- assert hasattr(e, 'args')
- self.assertEqual(e.args[0], 'timed out')
+ # want to exceed the size of the OS buffer so it'll block
+ msg = b"A" * (8 << 20)
+ expect_socket_timeout(client.sendall, msg)
evt.send()
gt.wait()
@@ -509,6 +482,9 @@ class TestGreenSocket(tests.LimitedTestCase):
while True:
try:
sock.sendall(b'hello world')
+ # Arbitrary delay to not use all available CPU, keeps the test
+ # running quickly and reliably under a second
+ time.sleep(0.001)
except socket.error as e:
if get_errno(e) == errno.EPIPE:
return
@@ -524,6 +500,9 @@ class TestGreenSocket(tests.LimitedTestCase):
while True:
data = client.recv(1024)
assert data
+ # Arbitrary delay to not use all available CPU, keeps the test
+ # running quickly and reliably under a second
+ time.sleep(0.001)
except socket.error as e:
# we get an EBADF because client is closed in the same process
# (but a different greenthread)
@@ -615,6 +594,21 @@ class TestGreenSocket(tests.LimitedTestCase):
# should not raise
greenio.shutdown_safe(sock)
+ def test_datagram_socket_operations_work(self):
+ receiver = greenio.GreenSocket(socket.AF_INET, socket.SOCK_DGRAM)
+ receiver.bind(('127.0.0.1', 0))
+ address = receiver.getsockname()
+
+ sender = greenio.GreenSocket(socket.AF_INET, socket.SOCK_DGRAM)
+
+ # Two ways sendto can be called
+ sender.sendto(b'first', address)
+ sender.sendto(b'second', 0, address)
+
+ sender_address = ('127.0.0.1', sender.getsockname()[1])
+ eq_(receiver.recvfrom(1024), (b'first', sender_address))
+ eq_(receiver.recvfrom(1024), (b'second', sender_address))
+
def test_get_fileno_of_a_socket_works():
class DummySocket(object):
@@ -711,6 +705,36 @@ class TestGreenPipe(tests.LimitedTestCase):
gt.wait()
+ def test_pip_read_until_end(self):
+ # similar to test_pip_read above but reading until eof
+ r, w = os.pipe()
+
+ r = greenio.GreenPipe(r, 'rb')
+ w = greenio.GreenPipe(w, 'wb')
+
+ w.write(b'c' * DEFAULT_BUFFER_SIZE * 2)
+ w.close()
+
+ buf = r.read() # no chunk size specified; read until end
+ self.assertEqual(len(buf), 2 * DEFAULT_BUFFER_SIZE)
+ self.assertEqual(buf[:3], b'ccc')
+
+ def test_pipe_read_unbuffered(self):
+ # Ensure that seting the buffer size works properly on GreenPipes,
+ # it used to be ignored on Python 2 and the test would hang on r.readline()
+ # below.
+ r, w = os.pipe()
+
+ r = greenio.GreenPipe(r, 'rb', 0)
+ w = greenio.GreenPipe(w, 'wb', 0)
+
+ w.write(b'line\n')
+
+ line = r.readline()
+ self.assertEqual(line, b'line\n')
+ r.close()
+ w.close()
+
def test_pipe_writes_large_messages(self):
r, w = os.pipe()
@@ -769,7 +793,7 @@ class TestGreenIoLong(tests.LimitedTestCase):
TEST_TIMEOUT = 10 # the test here might take a while depending on the OS
@tests.skip_with_pyevent
- def test_multiple_readers(self, clibufsize=False):
+ def test_multiple_readers(self):
debug.hub_prevent_multiple_readers(False)
recvsize = 2 * min_buf_size()
sendsize = 10 * recvsize
@@ -809,11 +833,15 @@ class TestGreenIoLong(tests.LimitedTestCase):
server_coro = eventlet.spawn(server)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', listener.getsockname()[1]))
- if clibufsize:
- bufsized(client, size=sendsize)
- else:
- bufsized(client)
- client.sendall(b'*' * sendsize)
+ bufsized(client, size=sendsize)
+
+ # Split into multiple chunks so that we can wait a little
+ # every iteration which allows both readers to queue and
+ # recv some data when we actually send it.
+ for i in range(20):
+ eventlet.sleep(0.001)
+ client.sendall(b'*' * (sendsize // 20))
+
client.close()
server_coro.wait()
listener.close()
@@ -821,110 +849,6 @@ class TestGreenIoLong(tests.LimitedTestCase):
assert len(results2) > 0
debug.hub_prevent_multiple_readers()
- @tests.skipped # by rdw because it fails but it's not clear how to make it pass
- @tests.skip_with_pyevent
- def test_multiple_readers2(self):
- self.test_multiple_readers(clibufsize=True)
-
-
-class TestGreenIoStarvation(tests.LimitedTestCase):
- # fixme: this doesn't succeed, because of eventlet's predetermined
- # ordering. two processes, one with server, one with client eventlets
- # might be more reliable?
-
- TEST_TIMEOUT = 300 # the test here might take a while depending on the OS
-
- @tests.skipped # by rdw, because it fails but it's not clear how to make it pass
- @tests.skip_with_pyevent
- def test_server_starvation(self, sendloops=15):
- recvsize = 2 * min_buf_size()
- sendsize = 10000 * recvsize
-
- results = [[] for i in range(5)]
-
- listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- listener.bind(('127.0.0.1', 0))
- port = listener.getsockname()[1]
- listener.listen(50)
-
- base_time = time.time()
-
- def server(my_results):
- sock, addr = listener.accept()
-
- datasize = 0
-
- t1 = None
- t2 = None
- try:
- while True:
- data = sock.recv(recvsize)
- if not t1:
- t1 = time.time() - base_time
- if not data:
- t2 = time.time() - base_time
- my_results.append(datasize)
- my_results.append((t1, t2))
- break
- datasize += len(data)
- finally:
- sock.close()
-
- def client():
- pid = os.fork()
- if pid:
- return pid
-
- client = _orig_sock.socket(socket.AF_INET, socket.SOCK_STREAM)
- client.connect(('127.0.0.1', port))
-
- bufsized(client, size=sendsize)
-
- for i in range(sendloops):
- client.sendall(b'*' * sendsize)
- client.close()
- os._exit(0)
-
- clients = []
- servers = []
- for r in results:
- servers.append(eventlet.spawn(server, r))
- for r in results:
- clients.append(client())
-
- for s in servers:
- s.wait()
- for c in clients:
- os.waitpid(c, 0)
-
- listener.close()
-
- # now test that all of the server receive intervals overlap, and
- # that there were no errors.
- for r in results:
- assert len(r) == 2, "length is %d not 2!: %s\n%s" % (len(r), r, results)
- assert r[0] == sendsize * sendloops
- assert len(r[1]) == 2
- assert r[1][0] is not None
- assert r[1][1] is not None
-
- starttimes = sorted(r[1][0] for r in results)
- endtimes = sorted(r[1][1] for r in results)
- runlengths = sorted(r[1][1] - r[1][0] for r in results)
-
- # assert that the last task started before the first task ended
- # (our no-starvation condition)
- assert starttimes[-1] < endtimes[0], \
- "Not overlapping: starts %s ends %s" % (starttimes, endtimes)
-
- maxstartdiff = starttimes[-1] - starttimes[0]
-
- assert maxstartdiff * 2 < runlengths[0], \
- "Largest difference in starting times more than twice the shortest running time!"
- assert runlengths[0] * 2 > runlengths[-1], \
- "Longest runtime more than twice as long as shortest!"
-
def test_set_nonblocking():
sock = _orig_sock.socket(socket.AF_INET, socket.SOCK_DGRAM)
@@ -954,3 +878,76 @@ def test_socket_del_fails_gracefully_when_not_fully_initialized():
def test_double_close_219():
tests.run_isolated('greenio_double_close_219.py')
+
+
+def test_partial_write_295():
+ # https://github.com/eventlet/eventlet/issues/295
+ # `socket.makefile('w').writelines()` must send all
+ # despite partial writes by underlying socket
+ listen_socket = eventlet.listen(('localhost', 0))
+ original_accept = listen_socket.accept
+
+ def talk(conn):
+ f = conn.makefile('wb')
+ line = b'*' * 2140
+ f.writelines([line] * 10000)
+ conn.close()
+
+ def accept():
+ connection, address = original_accept()
+ original_send = connection.send
+
+ def slow_send(b, *args):
+ b = b[:1031]
+ return original_send(b, *args)
+
+ connection.send = slow_send
+ eventlet.spawn(talk, connection)
+ return connection, address
+
+ listen_socket.accept = accept
+
+ eventlet.spawn(listen_socket.accept)
+ sock = eventlet.connect(listen_socket.getsockname())
+ with eventlet.Timeout(10):
+ bs = sock.makefile('rb').read()
+ assert len(bs) == 21400000
+ assert bs == (b'*' * 21400000)
+
+
+def test_socket_file_read_non_int():
+ listen_socket = eventlet.listen(('localhost', 0))
+
+ def server():
+ conn, _ = listen_socket.accept()
+ conn.recv(1)
+ conn.sendall('response')
+ conn.close()
+
+ eventlet.spawn(server)
+ sock = eventlet.connect(listen_socket.getsockname())
+
+ fd = sock.makefile('rwb')
+ fd.write(b'?')
+ fd.flush()
+ with eventlet.Timeout(1):
+ try:
+ fd.read("This shouldn't work")
+ assert False
+ except TypeError:
+ pass
+
+
+def test_pipe_context():
+ # ensure using a pipe as a context actually closes it.
+ r, w = os.pipe()
+ r = greenio.GreenPipe(r)
+ w = greenio.GreenPipe(w, 'w')
+
+ with r:
+ pass
+ assert r.closed and not w.closed
+
+ with w as f:
+ assert f == w
+ assert r.closed and w.closed
diff --git a/python-eventlet/tests/greenpipe_test_with_statement.py b/python-eventlet/tests/greenpipe_test_with_statement.py
deleted file mode 100644
index c0491b3..0000000
--- a/python-eventlet/tests/greenpipe_test_with_statement.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from __future__ import with_statement
-
-import os
-
-from eventlet import greenio
-from tests import LimitedTestCase
-
-
-class TestGreenPipeWithStatement(LimitedTestCase):
- def test_pipe_context(self):
- # ensure using a pipe as a context actually closes it.
- r, w = os.pipe()
-
- r = greenio.GreenPipe(r)
- w = greenio.GreenPipe(w, 'w')
-
- with r:
- pass
-
- assert r.closed and not w.closed
-
- with w as f:
- assert f == w
-
- assert r.closed and w.closed
diff --git a/python-eventlet/tests/isolated/env_tpool_negative.py b/python-eventlet/tests/isolated/env_tpool_negative.py
new file mode 100644
index 0000000..82c02d4
--- /dev/null
+++ b/python-eventlet/tests/isolated/env_tpool_negative.py
@@ -0,0 +1,11 @@
+__test__ = False
+
+if __name__ == '__main__':
+ from eventlet import tpool
+
+ def do():
+ print("should not get here")
+ try:
+ tpool.execute(do)
+ except AssertionError:
+ print('pass')
diff --git a/python-eventlet/tests/isolated/env_tpool_size.py b/python-eventlet/tests/isolated/env_tpool_size.py
new file mode 100644
index 0000000..a34a9c7
--- /dev/null
+++ b/python-eventlet/tests/isolated/env_tpool_size.py
@@ -0,0 +1,26 @@
+__test__ = False
+
+if __name__ == '__main__':
+ import sys
+ import time
+ from eventlet import tpool
+ import eventlet
+
+ current = [0]
+ highwater = [0]
+
+ def count():
+ current[0] += 1
+ time.sleep(0.01)
+ if current[0] > highwater[0]:
+ highwater[0] = current[0]
+ current[0] -= 1
+
+ expected = int(sys.argv[1])
+ normal = int(sys.argv[2])
+ p = eventlet.GreenPool()
+ for i in range(expected * 2):
+ p.spawn(tpool.execute, count)
+ p.waitall()
+ assert highwater[0] > normal, "Highwater %s <= %s" % (highwater[0], normal)
+ print('pass')
diff --git a/python-eventlet/tests/isolated/env_tpool_zero.py b/python-eventlet/tests/isolated/env_tpool_zero.py
new file mode 100644
index 0000000..13fad9c
--- /dev/null
+++ b/python-eventlet/tests/isolated/env_tpool_zero.py
@@ -0,0 +1,22 @@
+__test__ = False
+
+if __name__ == '__main__':
+ import warnings
+ from eventlet import tpool
+ g = [False]
+
+ def do():
+ g[0] = True
+
+ with warnings.catch_warnings(record=True) as ws:
+ warnings.simplefilter('always')
+
+ tpool.execute(do)
+
+ assert len(ws) == 1
+ msg = str(ws[0].message)
+ assert 'Zero threads in tpool' in msg
+ assert 'EVENTLET_THREADPOOL_SIZE' in msg
+
+ assert g[0]
+ print('pass')
diff --git a/python-eventlet/tests/isolated/greendns_from_address_203.py b/python-eventlet/tests/isolated/greendns_from_address_203.py
index 5c8bb14..dbf5899 100644
--- a/python-eventlet/tests/isolated/greendns_from_address_203.py
+++ b/python-eventlet/tests/isolated/greendns_from_address_203.py
@@ -1,16 +1,13 @@
__test__ = False
-
-def main():
+if __name__ == '__main__':
+ import sys
import eventlet
try:
from dns import reversename
except ImportError:
print('skip:require dns (package dnspython)')
- return
+ sys.exit(1)
eventlet.monkey_patch(all=True)
reversename.from_address('127.0.0.1')
print('pass')
-
-if __name__ == '__main__':
- main()
diff --git a/python-eventlet/tests/isolated/greenio_double_close_219.py b/python-eventlet/tests/isolated/greenio_double_close_219.py
index 9881ce8..ab27d19 100644
--- a/python-eventlet/tests/isolated/greenio_double_close_219.py
+++ b/python-eventlet/tests/isolated/greenio_double_close_219.py
@@ -1,7 +1,6 @@
__test__ = False
-
-def main():
+if __name__ == '__main__':
import eventlet
eventlet.monkey_patch()
import subprocess
@@ -17,6 +16,3 @@ def main():
f.close() # OSError, because the fd 3 has already been closed
print('pass')
-
-if __name__ == '__main__':
- main()
diff --git a/python-eventlet/tests/isolated/mysqldb_monkey_patch.py b/python-eventlet/tests/isolated/mysqldb_monkey_patch.py
index 8522d06..da1e074 100644
--- a/python-eventlet/tests/isolated/mysqldb_monkey_patch.py
+++ b/python-eventlet/tests/isolated/mysqldb_monkey_patch.py
@@ -1,6 +1,3 @@
-from __future__ import print_function
-
-# no standard tests in this file, ignore
__test__ = False
if __name__ == '__main__':
diff --git a/python-eventlet/tests/isolated/patcher_blocking_select_methods_are_deleted.py b/python-eventlet/tests/isolated/patcher_blocking_select_methods_are_deleted.py
new file mode 100644
index 0000000..67da6a5
--- /dev/null
+++ b/python-eventlet/tests/isolated/patcher_blocking_select_methods_are_deleted.py
@@ -0,0 +1,34 @@
+__test__ = False
+
+if __name__ == '__main__':
+ import eventlet
+ eventlet.monkey_patch()
+
+ # Leaving unpatched select methods in the select module is a recipe
+ # for trouble and this test makes sure we don't do that.
+ #
+ # Issues:
+ # * https://bitbucket.org/eventlet/eventlet/issues/167
+ # * https://github.com/eventlet/eventlet/issues/169
+ import select
+ # FIXME: must also delete `poll`, but it breaks subprocess `communicate()`
+ # https://github.com/eventlet/eventlet/issues/290
+ for name in ['devpoll', 'epoll', 'kqueue', 'kevent']:
+ assert not hasattr(select, name), name
+
+ import sys
+
+ if sys.version_info >= (3, 4):
+ import selectors
+ for name in [
+ 'PollSelector',
+ 'EpollSelector',
+ 'DevpollSelector',
+ 'KqueueSelector',
+ ]:
+ assert not hasattr(selectors, name), name
+
+ default = selectors.DefaultSelector
+ assert default is selectors.SelectSelector, default
+
+ print('pass')
diff --git a/python-eventlet/tests/isolated/patcher_importlib_lock.py b/python-eventlet/tests/isolated/patcher_importlib_lock.py
index b6e5a11..922f857 100644
--- a/python-eventlet/tests/isolated/patcher_importlib_lock.py
+++ b/python-eventlet/tests/isolated/patcher_importlib_lock.py
@@ -1,11 +1,3 @@
-from __future__ import print_function
-
-import sys
-
-import eventlet
-
-
-# no standard tests in this file, ignore
__test__ = False
@@ -14,6 +6,9 @@ def do_import():
if __name__ == '__main__':
+ import sys
+ import eventlet
+
eventlet.monkey_patch()
threading = eventlet.patcher.original('threading')
diff --git a/python-eventlet/tests/isolated/patcher_socketserver_selectors.py b/python-eventlet/tests/isolated/patcher_socketserver_selectors.py
new file mode 100644
index 0000000..91b56cb
--- /dev/null
+++ b/python-eventlet/tests/isolated/patcher_socketserver_selectors.py
@@ -0,0 +1,30 @@
+__test__ = False
+
+if __name__ == '__main__':
+ import eventlet
+ eventlet.monkey_patch()
+
+ from eventlet.support.six.moves.BaseHTTPServer import (
+ HTTPServer,
+ BaseHTTPRequestHandler,
+ )
+ import threading
+
+ server = HTTPServer(('localhost', 0), BaseHTTPRequestHandler)
+ thread = threading.Thread(target=server.serve_forever)
+
+ # Before fixing it the code would never go pass this line because:
+ # * socketserver.BaseServer that's used behind the scenes here uses
+ # selectors.PollSelector if it's available and we don't have green poll
+ # implementation so this just couldn't work
+ # * making socketserver use selectors.SelectSelector wasn't enough as
+ # until now we just failed to monkey patch selectors module
+ #
+ # Due to the issues above this thread.start() call effectively behaved
+ # like calling server.serve_forever() directly in the current thread
+ #
+ # Original report: https://github.com/eventlet/eventlet/issues/249
+ thread.start()
+
+ server.shutdown()
+ print('pass')
diff --git a/python-eventlet/tests/isolated/patcher_threading_condition.py b/python-eventlet/tests/isolated/patcher_threading_condition.py
index 5bce0f2..6390331 100644
--- a/python-eventlet/tests/isolated/patcher_threading_condition.py
+++ b/python-eventlet/tests/isolated/patcher_threading_condition.py
@@ -1,11 +1,8 @@
# Issue #185: test threading.Condition with monkey-patching
-import eventlet
-
-# no standard tests in this file, ignore
__test__ = False
-
if __name__ == '__main__':
+ import eventlet
eventlet.monkey_patch()
import threading
diff --git a/python-eventlet/tests/isolated/patcher_threading_join.py b/python-eventlet/tests/isolated/patcher_threading_join.py
index 4361f52..2d428c4 100644
--- a/python-eventlet/tests/isolated/patcher_threading_join.py
+++ b/python-eventlet/tests/isolated/patcher_threading_join.py
@@ -1,11 +1,8 @@
# Issue #223: test threading.Thread.join with monkey-patching
-import eventlet
-
-# no standard tests in this file, ignore
__test__ = False
-
if __name__ == '__main__':
+ import eventlet
eventlet.monkey_patch()
import threading
diff --git a/python-eventlet/tests/isolated/subprocess_patched_communicate.py b/python-eventlet/tests/isolated/subprocess_patched_communicate.py
new file mode 100644
index 0000000..1f17d89
--- /dev/null
+++ b/python-eventlet/tests/isolated/subprocess_patched_communicate.py
@@ -0,0 +1,11 @@
+__test__ = False
+
+if __name__ == '__main__':
+ import sys
+ import eventlet
+ import subprocess
+ eventlet.monkey_patch(all=True)
+ p = subprocess.Popen([sys.executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p.communicate()
+
+ print('pass')
diff --git a/python-eventlet/tests/isolated/wsgi_connection_timeout.py b/python-eventlet/tests/isolated/wsgi_connection_timeout.py
index 6a8c623..c2c7d3c 100644
--- a/python-eventlet/tests/isolated/wsgi_connection_timeout.py
+++ b/python-eventlet/tests/isolated/wsgi_connection_timeout.py
@@ -42,6 +42,7 @@ class BufferLog(object):
@staticmethod
def write(s):
output_buffer.append(s.rstrip())
+ return len(s)
# This test might make you wince
diff --git a/python-eventlet/tests/openssl_test.py b/python-eventlet/tests/openssl_test.py
new file mode 100644
index 0000000..a4498dc
--- /dev/null
+++ b/python-eventlet/tests/openssl_test.py
@@ -0,0 +1,17 @@
+import tests
+
+
+def test_import():
+ # https://github.com/eventlet/eventlet/issues/238
+ # Ensure that it's possible to import eventlet.green.OpenSSL.
+ # Most basic test to check Python 3 compatibility.
+ try:
+ import OpenSSL
+ except ImportError:
+ raise tests.SkipTest('need pyopenssl')
+
+ import eventlet.green.OpenSSL.SSL
+ import eventlet.green.OpenSSL.crypto
+ import eventlet.green.OpenSSL.rand
+ import eventlet.green.OpenSSL.tsafe
+ import eventlet.green.OpenSSL.version
diff --git a/python-eventlet/tests/patcher_test.py b/python-eventlet/tests/patcher_test.py
index 2e458c5..7ec12b1 100644
--- a/python-eventlet/tests/patcher_test.py
+++ b/python-eventlet/tests/patcher_test.py
@@ -291,18 +291,20 @@ import time
self.assertEqual(len(lines), 2, "\n".join(lines))
-class Subprocess(ProcessBase):
- def test_monkeypatched_subprocess(self):
- new_mod = """import eventlet
+def test_subprocess_after_monkey_patch():
+ code = '''\
+import sys
+import eventlet
eventlet.monkey_patch()
from eventlet.green import subprocess
-
-subprocess.Popen(['true'], stdin=subprocess.PIPE)
-print("done")
-"""
- self.write_to_tempfile("newmod", new_mod)
- output, lines = self.launch_subprocess('newmod')
- self.assertEqual(output, "done\n", output)
+subprocess.Popen([sys.executable, '-c', ''], stdin=subprocess.PIPE).wait()
+print('pass')
+'''
+ output = tests.run_python(
+ path=None,
+ args=['-c', code],
+ )
+ assert output.rstrip() == b'pass'
class Threading(ProcessBase):
@@ -324,8 +326,8 @@ print(len(_threading._active))
output, lines = self.launch_subprocess('newmod')
self.assertEqual(len(lines), 4, "\n".join(lines))
assert lines[0].startswith('