10 from eventlet import greenio
11 from eventlet import greenpool
12 from eventlet import support
13 from eventlet.green import BaseHTTPServer
14 from eventlet.green import socket
15 from eventlet.support import six
16 from eventlet.support.six.moves import urllib
19 DEFAULT_MAX_SIMULTANEOUS_REQUESTS = 1024
20 DEFAULT_MAX_HTTP_VERSION = 'HTTP/1.1'
21 MAX_REQUEST_LINE = 8192
22 MAX_HEADER_LINE = 8192
23 MAX_TOTAL_HEADER_SIZE = 65536
24 MINIMUM_CHUNK_SIZE = 4096
25 # %(client_port)s is also available
26 DEFAULT_LOG_FORMAT = ('%(client_ip)s - - [%(date_time)s] "%(request_line)s"'
27 ' %(status_code)s %(body_length)s %(wall_seconds).6f')
30 __all__ = ['server', 'format_date_time']
32 # Weekday and month names for HTTP date/time formatting; always English!
33 _weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
34 _monthname = [None, # Dummy so we can use 1-based month numbers
35 "Jan", "Feb", "Mar", "Apr", "May", "Jun",
36 "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
39 def format_date_time(timestamp):
40 """Formats a unix timestamp into an HTTP standard string."""
41 year, month, day, hh, mm, ss, wd, _y, _z = time.gmtime(timestamp)
42 return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
43 _weekdayname[wd], day, _monthname[month], year, hh, mm, ss
47 # Collections of error codes to compare against. Not all attributes are set
48 # on errno module on all platforms, so some are literals :(
49 BAD_SOCK = set((errno.EBADF, 10053))
50 BROKEN_SOCK = set((errno.EPIPE, errno.ECONNRESET))
53 class ChunkReadError(ValueError):
57 # special flag return value for apps
58 class _AlreadyHandled(object):
69 ALREADY_HANDLED = _AlreadyHandled()
84 if content_length is not None:
85 content_length = int(content_length)
86 self.content_length = content_length
89 self.wfile_line = wfile_line
92 self.chunked_input = chunked_input
93 self.chunk_length = -1
95 # (optional) headers to send with a "100 Continue" response. Set by
96 # calling set_hundred_continue_respose_headers() on env['wsgi.input']
97 self.hundred_continue_headers = None
98 self.is_hundred_continue_response_sent = False
100 def send_hundred_continue_response(self):
103 # 100 Continue status line
104 towrite.append(self.wfile_line)
107 if self.hundred_continue_headers is not None:
108 # 100 Continue headers
109 for header in self.hundred_continue_headers:
110 towrite.append(six.b('%s: %s\r\n' % header))
113 towrite.append(b'\r\n')
115 self.wfile.writelines(towrite)
118 # Reinitialize chunk_length (expect more data)
119 self.chunk_length = -1
121 def _do_read(self, reader, length=None):
122 if self.wfile is not None and not self.is_hundred_continue_response_sent:
123 # 100 Continue response
124 self.send_hundred_continue_response()
125 self.is_hundred_continue_response_sent = True
126 if (self.content_length is not None) and (
127 length is None or length > self.content_length - self.position):
128 length = self.content_length - self.position
132 read = reader(length)
133 except greenio.SSL.ZeroReturnError:
135 self.position += len(read)
138 def _chunked_read(self, rfile, length=None, use_readline=False):
139 if self.wfile is not None and not self.is_hundred_continue_response_sent:
140 # 100 Continue response
141 self.send_hundred_continue_response()
142 self.is_hundred_continue_response_sent = True
147 if length and length < 0:
151 reader = self.rfile.readline
153 reader = self.rfile.read
156 while self.chunk_length != 0:
157 maxreadlen = self.chunk_length - self.position
158 if length is not None and length < maxreadlen:
162 data = reader(maxreadlen)
164 self.chunk_length = 0
165 raise IOError("unexpected end of file while parsing chunked data")
168 response.append(data)
170 self.position += datalen
171 if self.chunk_length == self.position:
174 if length is not None:
178 if use_readline and data[-1] == "\n":
182 self.chunk_length = int(rfile.readline().split(b";", 1)[0], 16)
183 except ValueError as err:
184 raise ChunkReadError(err)
186 if self.chunk_length == 0:
188 except greenio.SSL.ZeroReturnError:
190 return b''.join(response)
192 def read(self, length=None):
193 if self.chunked_input:
194 return self._chunked_read(self.rfile, length)
195 return self._do_read(self.rfile.read, length)
197 def readline(self, size=None):
198 if self.chunked_input:
199 return self._chunked_read(self.rfile, size, True)
201 return self._do_read(self.rfile.readline, size)
203 def readlines(self, hint=None):
204 return self._do_read(self.rfile.readlines, hint)
207 return iter(self.read, b'')
209 def get_socket(self):
212 def set_hundred_continue_response_headers(self, headers,
213 capitalize_response_headers=True):
214 # Response headers capitalization (default)
215 # CONTent-TYpe: TExt/PlaiN -> Content-Type: TExt/PlaiN
216 # Per HTTP RFC standard, header name is case-insensitive.
217 # Please, fix your client to ignore header case if possible.
218 if capitalize_response_headers:
220 ('-'.join([x.capitalize() for x in key.split('-')]), value)
221 for key, value in headers]
222 self.hundred_continue_headers = headers
224 def discard(self, buffer_size=16 << 10):
225 while self.read(buffer_size):
229 class HeaderLineTooLong(Exception):
233 class HeadersTooLarge(Exception):
237 def get_logger(log, debug):
238 if callable(getattr(log, 'info', None)) \
239 and callable(getattr(log, 'debug', None)):
242 return LoggerFileWrapper(log, debug)
245 class LoggerFileWrapper(object):
246 def __init__(self, log, debug):
250 def error(self, msg, *args, **kwargs):
251 self.write(msg, *args)
253 def info(self, msg, *args, **kwargs):
254 self.write(msg, *args)
256 def debug(self, msg, *args, **kwargs):
258 self.write(msg, *args)
260 def write(self, msg, *args):
267 class FileObjectForHeaders(object):
269 def __init__(self, fp):
271 self.total_header_size = 0
273 def readline(self, size=-1):
277 rv = self.fp.readline(sz)
278 if len(rv) >= MAX_HEADER_LINE:
279 raise HeaderLineTooLong()
280 self.total_header_size += len(rv)
281 if self.total_header_size > MAX_TOTAL_HEADER_SIZE:
282 raise HeadersTooLarge()
286 class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
287 protocol_version = 'HTTP/1.1'
288 minimum_chunk_size = MINIMUM_CHUNK_SIZE
289 capitalize_response_headers = True
291 # https://github.com/eventlet/eventlet/issues/295
292 # Stdlib default is 0 (unbuffered), but then `wfile.writelines()` looses data
293 # so before going back to unbuffered, remove any usage of `writelines`.
297 # overriding SocketServer.setup to correctly handle SSL.Connection objects
298 conn = self.connection = self.request
300 # TCP_QUICKACK is a better alternative to disabling Nagle's algorithm
301 # https://news.ycombinator.com/item?id=10607422
302 if getattr(socket, 'TCP_QUICKACK', None):
304 conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, True)
309 self.rfile = conn.makefile('rb', self.rbufsize)
310 self.wfile = conn.makefile('wb', self.wbufsize)
311 except (AttributeError, NotImplementedError):
312 if hasattr(conn, 'send') and hasattr(conn, 'recv'):
313 # it's an SSL.Connection
314 self.rfile = socket._fileobject(conn, "rb", self.rbufsize)
315 self.wfile = socket._fileobject(conn, "wb", self.wbufsize)
317 # it's a SSLObject, or a martian
318 raise NotImplementedError("wsgi.py doesn't support sockets "
319 "of type %s" % type(conn))
321 def handle_one_request(self):
322 if self.server.max_http_version:
323 self.protocol_version = self.server.max_http_version
325 if self.rfile.closed:
326 self.close_connection = 1
330 self.raw_requestline = self.rfile.readline(self.server.url_length_limit)
331 if len(self.raw_requestline) == self.server.url_length_limit:
333 b"HTTP/1.0 414 Request URI Too Long\r\n"
334 b"Connection: close\r\nContent-length: 0\r\n\r\n")
335 self.close_connection = 1
337 except greenio.SSL.ZeroReturnError:
338 self.raw_requestline = ''
339 except socket.error as e:
340 if support.get_errno(e) not in BAD_SOCK:
342 self.raw_requestline = ''
344 if not self.raw_requestline:
345 self.close_connection = 1
348 orig_rfile = self.rfile
350 self.rfile = FileObjectForHeaders(self.rfile)
351 if not self.parse_request():
353 except HeaderLineTooLong:
355 b"HTTP/1.0 400 Header Line Too Long\r\n"
356 b"Connection: close\r\nContent-length: 0\r\n\r\n")
357 self.close_connection = 1
359 except HeadersTooLarge:
361 b"HTTP/1.0 400 Headers Too Large\r\n"
362 b"Connection: close\r\nContent-length: 0\r\n\r\n")
363 self.close_connection = 1
366 self.rfile = orig_rfile
368 content_length = self.headers.get('content-length')
374 b"HTTP/1.0 400 Bad Request\r\n"
375 b"Connection: close\r\nContent-length: 0\r\n\r\n")
376 self.close_connection = 1
379 self.environ = self.get_environ()
380 self.application = self.server.app
382 self.server.outstanding_requests += 1
384 self.handle_one_response()
385 except socket.error as e:
386 # Broken pipe, connection reset by peer
387 if support.get_errno(e) not in BROKEN_SOCK:
390 self.server.outstanding_requests -= 1
392 def handle_one_response(self):
399 use_chunked = [False]
406 raise AssertionError("write() before start_response()")
407 elif not headers_sent:
408 status, response_headers = headers_set
409 headers_sent.append(1)
410 header_list = [header[0].lower() for header in response_headers]
411 towrite.append(six.b('%s %s\r\n' % (self.protocol_version, status)))
412 for header in response_headers:
413 towrite.append(six.b('%s: %s\r\n' % header))
416 if 'date' not in header_list:
417 towrite.append(six.b('Date: %s\r\n' % (format_date_time(time.time()),)))
419 client_conn = self.headers.get('Connection', '').lower()
420 send_keep_alive = False
421 if self.close_connection == 0 and \
422 self.server.keepalive and (client_conn == 'keep-alive' or
423 (self.request_version == 'HTTP/1.1' and
424 not client_conn == 'close')):
425 # only send keep-alives back to clients that sent them,
426 # it's redundant for 1.1 connections
427 send_keep_alive = (client_conn == 'keep-alive')
428 self.close_connection = 0
430 self.close_connection = 1
432 if 'content-length' not in header_list:
433 if self.request_version == 'HTTP/1.1':
434 use_chunked[0] = True
435 towrite.append(b'Transfer-Encoding: chunked\r\n')
436 elif 'content-length' not in header_list:
437 # client is 1.0 and therefore must read to EOF
438 self.close_connection = 1
440 if self.close_connection:
441 towrite.append(b'Connection: close\r\n')
442 elif send_keep_alive:
443 towrite.append(b'Connection: keep-alive\r\n')
444 towrite.append(b'\r\n')
445 # end of header writing
448 # Write the chunked encoding
449 towrite.append(six.b("%x" % (len(data),)) + b"\r\n" + data + b"\r\n")
452 wfile.writelines(towrite)
454 length[0] = length[0] + sum(map(len, towrite))
456 def start_response(status, response_headers, exc_info=None):
457 status_code[0] = status.split()[0]
461 # Re-raise original exception if headers sent
462 six.reraise(exc_info[0], exc_info[1], exc_info[2])
464 # Avoid dangling circular ref
467 # Response headers capitalization
468 # CONTent-TYpe: TExt/PlaiN -> Content-Type: TExt/PlaiN
469 # Per HTTP RFC standard, header name is case-insensitive.
470 # Please, fix your client to ignore header case if possible.
471 if self.capitalize_response_headers:
473 ('-'.join([x.capitalize() for x in key.split('-')]), value)
474 for key, value in response_headers]
476 headers_set[:] = [status, response_headers]
481 result = self.application(self.environ, start_response)
482 if (isinstance(result, _AlreadyHandled)
483 or isinstance(getattr(result, '_obj', None), _AlreadyHandled)):
484 self.close_connection = 1
487 # Set content-length if possible
488 if not headers_sent and hasattr(result, '__len__') and \
489 'Content-Length' not in [h for h, _v in headers_set[1]]:
490 headers_set[1].append(('Content-Length', str(sum(map(len, result)))))
494 just_written_size = 0
495 minimum_write_chunk_size = int(self.environ.get(
496 'eventlet.minimum_write_chunk_size', self.minimum_chunk_size))
500 if isinstance(data, six.text_type):
501 data = data.encode('ascii')
504 towrite_size += len(data)
505 if towrite_size >= minimum_write_chunk_size:
506 write(b''.join(towrite))
508 just_written_size = towrite_size
511 just_written_size = towrite_size
512 write(b''.join(towrite))
513 if not headers_sent or (use_chunked[0] and just_written_size):
516 self.close_connection = 1
517 tb = traceback.format_exc()
518 self.server.log.info(tb)
520 err_body = six.b(tb) if self.server.debug else b''
521 start_response("500 Internal Server Error",
522 [('Content-type', 'text/plain'),
523 ('Content-length', len(err_body))])
526 if hasattr(result, 'close'):
528 request_input = self.environ['eventlet.input']
529 if (request_input.chunked_input or
530 request_input.position < (request_input.content_length or 0)):
531 # Read and discard body if there was no pending 100-continue
532 if not request_input.wfile and self.close_connection == 0:
534 request_input.discard()
535 except ChunkReadError as e:
536 self.close_connection = 1
537 self.server.log.error((
538 'chunked encoding error while discarding request body.'
539 + ' ip={0} request="{1}" error="{2}"').format(
540 self.get_client_ip(), self.requestline, e,
544 for hook, args, kwargs in self.environ['eventlet.posthooks']:
545 hook(self.environ, *args, **kwargs)
547 if self.server.log_output:
548 self.server.log.info(self.server.log_format % {
549 'client_ip': self.get_client_ip(),
550 'client_port': self.client_address[1],
551 'date_time': self.log_date_time_string(),
552 'request_line': self.requestline,
553 'status_code': status_code[0],
554 'body_length': length[0],
555 'wall_seconds': finish - start,
558 def get_client_ip(self):
559 client_ip = self.client_address[0]
560 if self.server.log_x_forwarded_for:
561 forward = self.headers.get('X-Forwarded-For', '').replace(' ', '')
563 client_ip = "%s,%s" % (forward, client_ip)
566 def get_environ(self):
567 env = self.server.get_environ()
568 env['REQUEST_METHOD'] = self.command
569 env['SCRIPT_NAME'] = ''
571 pq = self.path.split('?', 1)
572 env['RAW_PATH_INFO'] = pq[0]
573 env['PATH_INFO'] = urllib.parse.unquote(pq[0])
575 env['QUERY_STRING'] = pq[1]
577 ct = self.headers.get('content-type')
580 ct = self.headers.type
581 except AttributeError:
582 ct = self.headers.get_content_type()
583 env['CONTENT_TYPE'] = ct
585 length = self.headers.get('content-length')
587 env['CONTENT_LENGTH'] = length
588 env['SERVER_PROTOCOL'] = 'HTTP/1.0'
590 host, port = self.request.getsockname()[:2]
591 env['SERVER_NAME'] = host
592 env['SERVER_PORT'] = str(port)
593 env['REMOTE_ADDR'] = self.client_address[0]
594 env['REMOTE_PORT'] = str(self.client_address[1])
595 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
598 headers = self.headers.headers
599 except AttributeError:
600 headers = self.headers._headers
602 headers = [h.split(':', 1) for h in headers]
605 k = k.replace('-', '_').upper()
615 if env.get('HTTP_EXPECT') == '100-continue':
617 wfile_line = b'HTTP/1.1 100 Continue\r\n'
621 chunked = env.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked'
622 env['wsgi.input'] = env['eventlet.input'] = Input(
623 self.rfile, length, self.connection, wfile=wfile, wfile_line=wfile_line,
624 chunked_input=chunked)
625 env['eventlet.posthooks'] = []
631 BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
632 except socket.error as e:
633 # Broken pipe, connection reset by peer
634 if support.get_errno(e) not in BROKEN_SOCK:
636 greenio.shutdown_safe(self.connection)
637 self.connection.close()
639 def handle_expect_100(self):
643 class Server(BaseHTTPServer.HTTPServer):
651 max_http_version=None,
652 protocol=HttpProtocol,
653 minimum_chunk_size=None,
654 log_x_forwarded_for=True,
657 log_format=DEFAULT_LOG_FORMAT,
658 url_length_limit=MAX_REQUEST_LINE,
661 capitalize_response_headers=True):
663 self.outstanding_requests = 0
665 self.address = address
667 self.log = get_logger(log, debug)
669 self.log = get_logger(sys.stderr, debug)
671 self.keepalive = keepalive
672 self.environ = environ
673 self.max_http_version = max_http_version
674 self.protocol = protocol
675 self.pid = os.getpid()
676 self.minimum_chunk_size = minimum_chunk_size
677 self.log_x_forwarded_for = log_x_forwarded_for
678 self.log_output = log_output
679 self.log_format = log_format
680 self.url_length_limit = url_length_limit
682 self.socket_timeout = socket_timeout
683 self.capitalize_response_headers = capitalize_response_headers
685 if not self.capitalize_response_headers:
686 warnings.warn("""capitalize_response_headers is disabled.
687 Please, make sure you know what you are doing.
688 HTTP headers names are case-insensitive per RFC standard.
689 Most likely, you need to fix HTTP parsing in your client software.""",
690 DeprecationWarning, stacklevel=3)
692 def get_environ(self):
694 'wsgi.errors': sys.stderr,
695 'wsgi.version': (1, 0),
696 'wsgi.multithread': True,
697 'wsgi.multiprocess': False,
698 'wsgi.run_once': False,
699 'wsgi.url_scheme': 'http',
701 # detect secure socket
702 if hasattr(self.socket, 'do_handshake'):
703 d['wsgi.url_scheme'] = 'https'
705 if self.environ is not None:
706 d.update(self.environ)
709 def process_request(self, sock_params):
710 # The actual request handling takes place in __init__, so we need to
711 # set minimum_chunk_size before __init__ executes and we don't want to modify
713 sock, address = sock_params
714 proto = new(self.protocol)
715 if self.minimum_chunk_size is not None:
716 proto.minimum_chunk_size = self.minimum_chunk_size
717 proto.capitalize_response_headers = self.capitalize_response_headers
719 proto.__init__(sock, address, self)
720 except socket.timeout:
721 # Expected exceptions are not exceptional
723 # similar to logging "accepted" in server()
724 self.log.debug('(%s) timed out %r' % (self.pid, address))
726 def log_message(self, message):
727 warnings.warn('server.log_message is deprecated. Please use server.log.info instead')
728 self.log.info(message)
732 new = types.InstanceType
733 except AttributeError:
734 new = lambda cls: cls.__new__(cls)
739 ACCEPT_EXCEPTIONS = (socket.error, ssl.SSLError)
740 ACCEPT_ERRNO = set((errno.EPIPE, errno.EBADF, errno.ECONNRESET,
741 ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_SSL))
743 ACCEPT_EXCEPTIONS = (socket.error,)
744 ACCEPT_ERRNO = set((errno.EPIPE, errno.EBADF, errno.ECONNRESET))
747 def socket_repr(sock):
749 if hasattr(sock, 'do_handshake'):
752 name = sock.getsockname()
753 if sock.family == socket.AF_INET:
754 hier_part = '//{0}:{1}'.format(*name)
755 elif sock.family == socket.AF_INET6:
756 hier_part = '//[{0}]:{1}'.format(*name[:2])
757 elif sock.family == socket.AF_UNIX:
760 hier_part = repr(name)
762 return scheme + ':' + hier_part
765 def server(sock, site,
769 max_http_version=DEFAULT_MAX_HTTP_VERSION,
770 protocol=HttpProtocol,
772 minimum_chunk_size=None,
773 log_x_forwarded_for=True,
777 log_format=DEFAULT_LOG_FORMAT,
778 url_length_limit=MAX_REQUEST_LINE,
781 capitalize_response_headers=True):
782 """Start up a WSGI server handling requests from the supplied server
783 socket. This function loops forever. The *sock* object will be
784 closed after server exits, but the underlying file descriptor will
785 remain open, so if you have a dup() of *sock*, it will remain usable.
789 At the moment :func:`server` will always wait for active connections to finish before
790 exiting, even if there's an exception raised inside it
791 (*all* exceptions are handled the same way, including :class:`greenlet.GreenletExit`
792 and those inheriting from `BaseException`).
794 While this may not be an issue normally, when it comes to long running HTTP connections
795 (like :mod:`eventlet.websocket`) it will become problematic and calling
796 :meth:`~eventlet.greenthread.GreenThread.wait` on a thread that runs the server may hang,
797 even after using :meth:`~eventlet.greenthread.GreenThread.kill`, as long
798 as there are active connections.
800 :param sock: Server socket, must be already bound to a port and listening.
801 :param site: WSGI application function.
802 :param log: File-like object that logs should be written to.
803 If not specified, sys.stderr is used.
804 :param environ: Additional parameters that go into the environ dictionary of every request.
805 :param max_size: Maximum number of client connections opened at any time by this server.
807 :param max_http_version: Set to "HTTP/1.0" to make the server pretend it only supports HTTP 1.0.
808 This can help with applications or clients that don't behave properly using HTTP 1.1.
809 :param protocol: Protocol class. Deprecated.
810 :param server_event: Used to collect the Server object. Deprecated.
811 :param minimum_chunk_size: Minimum size in bytes for http chunks. This can be used to improve
812 performance of applications which yield many small strings, though
813 using it technically violates the WSGI spec. This can be overridden
814 on a per request basis by setting environ['eventlet.minimum_write_chunk_size'].
815 :param log_x_forwarded_for: If True (the default), logs the contents of the x-forwarded-for
816 header in addition to the actual client ip address in the 'client_ip' field of the
818 :param custom_pool: A custom GreenPool instance which is used to spawn client green threads.
819 If this is supplied, max_size is ignored.
820 :param keepalive: If set to False, disables keepalives on the server; all connections will be
821 closed after serving one request.
822 :param log_output: A Boolean indicating if the server will log data or not.
823 :param log_format: A python format string that is used as the template to generate log lines.
824 The following values can be formatted into it: client_ip, date_time, request_line,
825 status_code, body_length, wall_seconds. The default is a good example of how to
827 :param url_length_limit: A maximum allowed length of the request url. If exceeded, 414 error
829 :param debug: True if the server should send exception tracebacks to the clients on 500 errors.
830 If False, the server will respond with empty bodies.
831 :param socket_timeout: Timeout for client connections' socket operations. Default None means
833 :param capitalize_response_headers: Normalize response headers' names to Foo-Bar.
836 serv = Server(sock, sock.getsockname(),
839 max_http_version=max_http_version,
841 minimum_chunk_size=minimum_chunk_size,
842 log_x_forwarded_for=log_x_forwarded_for,
844 log_output=log_output,
845 log_format=log_format,
846 url_length_limit=url_length_limit,
848 socket_timeout=socket_timeout,
849 capitalize_response_headers=capitalize_response_headers,
851 if server_event is not None:
852 server_event.send(serv)
854 max_size = DEFAULT_MAX_SIMULTANEOUS_REQUESTS
855 if custom_pool is not None:
858 pool = greenpool.GreenPool(max_size)
860 serv.log.info("(%s) wsgi starting up on %s" % (
861 serv.pid, socket_repr(sock)))
864 client_socket = sock.accept()
865 client_socket[0].settimeout(serv.socket_timeout)
866 serv.log.debug("(%s) accepted %r" % (
867 serv.pid, client_socket[1]))
869 pool.spawn_n(serv.process_request, client_socket)
870 except AttributeError:
871 warnings.warn("wsgi's pool should be an instance of "
872 "eventlet.greenpool.GreenPool, is %s. Please convert your"
873 " call site to use GreenPool instead" % type(pool),
874 DeprecationWarning, stacklevel=2)
875 pool.execute_async(serv.process_request, client_socket)
876 except ACCEPT_EXCEPTIONS as e:
877 if support.get_errno(e) not in ACCEPT_ERRNO:
879 except (KeyboardInterrupt, SystemExit):
880 serv.log.info("wsgi exiting")
884 serv.log.info("(%s) wsgi exited, is_accepting=%s" % (
885 serv.pid, is_accepting))
887 # NOTE: It's not clear whether we want this to leave the
888 # socket open or close it. Use cases like Spawning want
889 # the underlying fd to remain open, but if we're going
890 # that far we might as well not bother closing sock at
893 except socket.error as e:
894 if support.get_errno(e) not in BROKEN_SOCK:
895 traceback.print_exc()