help=_("TCP Port used by Neutron metadata namespace "
"proxy.")),
cfg.IntOpt('send_arp_for_ha',
- default=3,
- help=_("Send this many gratuitous ARPs for HA setup, "
- "set it below or equal to 0 to disable this "
- "feature.")),
+ default=0,
+ help=_("Send this many gratuitous ARPs for HA setup, if "
+ "less than or equal to 0, the feature is disabled")),
cfg.BoolOpt('use_namespaces', default=True,
help=_("Allow overlapping IP.")),
cfg.StrOpt('router_id', default='',
self._spawn_metadata_proxy(ri)
def _router_removed(self, router_id):
- ri = self.router_info[router_id]
+ ri = self.router_info.get(router_id)
+ if ri is None:
+ LOG.warn(_("Info for router %s were not found. "
+ "Skipping router removal"), router_id)
+ return
ri.router['gw_port'] = None
ri.router[l3_constants.INTERFACE_KEY] = []
ri.router[l3_constants.FLOATINGIP_KEY] = []
# so we can clear the value of updated_routers
# and removed_routers
try:
+ LOG.debug(_("Starting RPC loop for %d updated routers"),
+ len(self.updated_routers))
if self.updated_routers:
router_ids = list(self.updated_routers)
self.updated_routers.clear()
self.context, router_ids)
self._process_routers(routers)
self._process_router_delete()
+ LOG.debug(_("RPC loop successfully completed"))
except Exception:
LOG.exception(_("Failed synchronizing routers"))
self.fullsync = True
def _sync_routers_task(self, context):
if self.services_sync:
super(L3NATAgent, self).process_services_sync(context)
+ LOG.debug(_("Starting _sync_routers_task - fullsync:%s"),
+ self.fullsync)
if not self.fullsync:
return
try:
LOG.debug(_('Processing :%r'), routers)
self._process_routers(routers, all_routers=True)
self.fullsync = False
+ LOG.debug(_("_sync_routers_task successfully completed"))
except Exception:
LOG.exception(_("Failed synchronizing routers"))
self.fullsync = True
self.heartbeat.start(interval=report_interval)
def _report_state(self):
+ LOG.debug(_("Report state task started"))
num_ex_gw_ports = 0
num_interfaces = 0
num_floating_ips = 0
self.use_call)
self.agent_state.pop('start_flag', None)
self.use_call = False
+ LOG.debug(_("Report state task successfully completed"))
except AttributeError:
# This means the server does not support report_state
LOG.warn(_("Neutron server does not support state report."
import tempfile
from eventlet.green import subprocess
+from eventlet import greenthread
from neutron.common import utils
from neutron.openstack.common import log as logging
env = os.environ.copy()
if addl_env:
env.update(addl_env)
- obj = utils.subprocess_popen(cmd, shell=False,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env=env)
-
- _stdout, _stderr = (process_input and
- obj.communicate(process_input) or
- obj.communicate())
- obj.stdin.close()
- m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n"
- "Stderr: %(stderr)r") % {'cmd': cmd, 'code': obj.returncode,
- 'stdout': _stdout, 'stderr': _stderr}
- LOG.debug(m)
- if obj.returncode and check_exit_code:
- raise RuntimeError(m)
+ try:
+ obj = utils.subprocess_popen(cmd, shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=env)
+ _stdout, _stderr = (process_input and
+ obj.communicate(process_input) or
+ obj.communicate())
+ obj.stdin.close()
+ m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n"
+ "Stderr: %(stderr)r") % {'cmd': cmd, 'code': obj.returncode,
+ 'stdout': _stdout, 'stderr': _stderr}
+ LOG.debug(m)
+ if obj.returncode and check_exit_code:
+ raise RuntimeError(m)
+ finally:
+ # NOTE(termie): this appears to be necessary to let the subprocess
+ # call clean something up in between calls, without
+ # it two execute calls in a row hangs the second one
+ greenthread.sleep(0)
return return_stderr and (_stdout, _stderr) or _stdout