def _get_oslo_configs():
- """Returns the oslo.config options to register."""
+ """Returns the oslo config options to register."""
# NOTE(flaper87): Oslo config should be
# optional. Instead of doing try / except
# at the top of this file, lets import cfg
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(
- _LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
+ _LI('Eventlet backdoor listening on %(port)s for process %(pid)d'),
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
- LOG.debug("Reloading cached file %s" % filename)
+ LOG.debug("Reloading cached file %s", filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
break
delay = end - start - interval
if delay > 0:
- LOG.warn(_LW('task %(func_name)r run outlasted '
- 'interval by %(delay).2f sec'),
- {'func_name': self.f, 'delay': delay})
+ LOG.warning(_LW('task %(func_name)r run outlasted '
+ 'interval by %(delay).2f sec'),
+ {'func_name': self.f, 'delay': delay})
greenthread.sleep(-delay if delay < 0 else 0)
except LoopingCallDone as e:
self.stop()
try:
task(self, context)
- except Exception as e:
+ except Exception:
if raise_on_error:
raise
- LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"),
- {"full_task_name": full_task_name, "e": e})
+ LOG.exception(_LE("Error during %(full_task_name)s"),
+ {"full_task_name": full_task_name})
time.sleep(0)
return idle_for
"""Generic Node base class for all workers that run on hosts."""
import errno
+import io
import logging
import os
import random
import sys
import time
-try:
- # Importing just the symbol here because the io module does not
- # exist in Python 2.6.
- from io import UnsupportedOperation # noqa
-except ImportError:
- # Python 2.6
- UnsupportedOperation = None
-
import eventlet
from eventlet import event
from oslo_config import cfg
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
try:
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
+ except io.UnsupportedOperation:
+ # Could not get the fileno for stdout, so we must be a daemon.
+ is_daemon = True
except OSError as err:
if err.errno == errno.ENOTTY:
# Assume we are a daemon because there is no terminal.
is_daemon = True
else:
raise
- except UnsupportedOperation:
- # Could not get the fileno for stdout, so we must be a daemon.
- is_daemon = True
return is_daemon
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
- self.readpipe.read()
+ self.readpipe.read(1)
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
def _child_process_handle_signal(self):
# Setup child signal handlers differently
- def _sigterm(*args):
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
- raise SignalExit(signal.SIGTERM)
-
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)
- signal.signal(signal.SIGTERM, _sigterm)
+ # Parent signals with SIGTERM when it wants us to go away.
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
import eventlet
from eventlet import greenpool
+from neutron.openstack.common._i18n import _LE
from neutron.openstack.common import loopingcall
x.stop()
except eventlet.greenlet.GreenletExit:
pass
- except Exception as ex:
- LOG.exception(ex)
+ except Exception:
+ LOG.exception(_LE('Error stopping thread.'))
def stop_timers(self):
for x in self.timers:
try:
x.stop()
- except Exception as ex:
- LOG.exception(ex)
+ except Exception:
+ LOG.exception(_LE('Error stopping timer.'))
self.timers = []
def stop(self, graceful=False):
x.wait()
except eventlet.greenlet.GreenletExit:
pass
- except Exception as ex:
- LOG.exception(ex)
+ except Exception:
+ LOG.exception(_LE('Error waiting on ThreadGroup.'))
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
module=cache
module=eventlet_backdoor
module=fileutils
-module=install_venv_common
+# The following module is not synchronized by update.sh script since it's
+# located in tools/ not neutron/openstack/common/. Left here to make it
+# explicit that we still ship code from incubator here
+#module=install_venv_common
module=loopingcall
module=periodic_task
module=service
module=systemd
module=threadgroup
-module=uuidutils
# The base module to hold the copy of openstack.common
base=neutron
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
- # setuptools.
- self.pip_install('pip>=1.3')
+ # setuptools and pbr
+ self.pip_install('pip>=1.4')
self.pip_install('setuptools')
+ self.pip_install('pbr')
- self.pip_install('-r', self.requirements)
- self.pip_install('-r', self.test_requirements)
+ self.pip_install('-r', self.requirements, '-r', self.test_requirements)
def parse_args(self, argv):
"""Parses command-line arguments."""
parser.add_option('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
- "install")
+ "install.")
return parser.parse_args(argv[1:])[0]