# and not through this parameter.
# ipv6_gateway =
+# (StrOpt) Driver used for ipv6 prefix delegation. This needs to be
+# an entry point defined in the neutron.agent.linux.pd_drivers namespace. See
+# setup.cfg for entry points included with the neutron source.
+# prefix_delegation_driver = dibbler
+
# Indicates that this L3 agent should also handle routers that do not have
# an external network gateway configured. This option should be True only
# for a single agent in a Neutron deployment, and may be False for all agents
--- /dev/null
+# neutron-rootwrap command filters for nodes on which neutron is
+# expected to control network
+#
+# This file should be owned by (and only-writeable by) the root user
+
+# format seems to be
+# cmd-name: filter-name, raw-command, user, args
+
+[Filters]
+
+# Filters for the dibbler-based reference implementation of the pluggable
+# Prefix Delegation driver. Other implementations using an alternative agent
+# should include a similar filter in this folder.
+
+# prefix_delegation_agent
+dibbler-client: CommandFilter, dibbler-client, root
from neutron.agent.l3 import router_processing_queue as queue
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
+from neutron.agent.linux import pd
from neutron.agent.metadata import driver as metadata_driver
from neutron.agent import rpc as agent_rpc
from neutron.callbacks import events
1.4 - Added L3 HA update_router_state. This method was reworked in
to update_ha_routers_states
1.5 - Added update_ha_routers_states
+ 1.6 - Added process_prefix_update
"""
return cctxt.call(context, 'update_ha_routers_states',
host=self.host, states=states)
+ def process_prefix_update(self, context, prefix_update):
+ """Process prefix update whenever prefixes get changed."""
+ cctxt = self.client.prepare(version='1.6')
+ return cctxt.call(context, 'process_prefix_update',
+ subnets=prefix_update)
+
class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
ha.AgentMixin,
self.target_ex_net_id = None
self.use_ipv6 = ipv6_utils.is_enabled()
+ self.pd = pd.PrefixDelegation(self.context, self.process_monitor,
+ self.driver,
+ self.plugin_rpc.process_prefix_update,
+ self.create_pd_router_update,
+ self.conf)
+
def _check_config_params(self):
"""Check items in configuration files.
for rp, update in self._queue.each_update_to_next_router():
LOG.debug("Starting router update for %s, action %s, priority %s",
update.id, update.action, update.priority)
+ if update.action == queue.PD_UPDATE:
+ self.pd.process_prefix_update()
+ continue
router = update.router
if update.action != queue.DELETE_ROUTER and not router:
try:
# When L3 agent is ready, we immediately do a full sync
self.periodic_sync_routers_task(self.context)
+ def create_pd_router_update(self):
+ router_id = None
+ update = queue.RouterUpdate(router_id,
+ queue.PRIORITY_PD_UPDATE,
+ timestamp=timeutils.utcnow(),
+ action=queue.PD_UPDATE)
+ self._queue.add(update)
+
class L3NATAgentWithStateReport(L3NATAgent):
# When L3 agent is ready, we immediately do a full sync
self.periodic_sync_routers_task(self.context)
+ self.pd.after_start()
+
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.fullsync = True
"next-hop using a global unique address (GUA) is "
"desired, it needs to be done via a subnet allocated "
"to the network and not through this parameter. ")),
+ cfg.StrOpt('prefix_delegation_driver',
+ default='dibbler',
+ help=_('Driver used for ipv6 prefix delegation. This needs to '
+ 'be an entry point defined in the '
+ 'neutron.agent.linux.pd_drivers namespace. See '
+ 'setup.cfg for entry points included with the neutron '
+ 'source.')),
cfg.BoolOpt('enable_metadata_proxy', default=True,
help=_("Allow running metadata proxy.")),
cfg.BoolOpt('router_delete_namespaces', default=True,
from neutron.agent.linux import ra
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
+from neutron.common import ipv6_utils
from neutron.common import utils as common_utils
from neutron.i18n import _LW
if self.router_namespace:
self.router_namespace.delete()
+ def _internal_network_updated(self, port, subnet_id, prefix, old_prefix,
+ updated_cidrs):
+ interface_name = self.get_internal_device_name(port['id'])
+ if prefix != l3_constants.PROVISIONAL_IPV6_PD_PREFIX:
+ fixed_ips = port['fixed_ips']
+ for fixed_ip in fixed_ips:
+ if fixed_ip['subnet_id'] == subnet_id:
+ v6addr = common_utils.ip_to_cidr(fixed_ip['ip_address'],
+ fixed_ip.get('prefixlen'))
+ if v6addr not in updated_cidrs:
+ self.driver.add_ipv6_addr(interface_name, v6addr,
+ self.ns_name)
+ else:
+ self.driver.delete_ipv6_addr_with_prefix(interface_name,
+ old_prefix,
+ self.ns_name)
+
def _internal_network_added(self, ns_name, network_id, port_id,
fixed_ips, mac_address,
interface_name, prefix):
def _port_has_ipv6_subnet(port):
if 'subnets' in port:
for subnet in port['subnets']:
- if netaddr.IPNetwork(subnet['cidr']).version == 6:
+ if (netaddr.IPNetwork(subnet['cidr']).version == 6 and
+ subnet['cidr'] != l3_constants.PROVISIONAL_IPV6_PD_PREFIX):
return True
def enable_radvd(self, internal_ports=None):
self.driver.init_l3(interface_name, ip_cidrs=ip_cidrs,
namespace=self.ns_name)
- def _process_internal_ports(self):
+ def _process_internal_ports(self, pd):
existing_port_ids = set(p['id'] for p in self.internal_ports)
internal_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
LOG.debug("appending port %s to internal_ports cache", p)
self.internal_ports.append(p)
enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
+ for subnet in p['subnets']:
+ if ipv6_utils.is_ipv6_pd_enabled(subnet):
+ interface_name = self.get_internal_device_name(p['id'])
+ pd.enable_subnet(self.router_id, subnet['id'],
+ subnet['cidr'],
+ interface_name, p['mac_address'])
for p in old_ports:
self.internal_network_removed(p)
LOG.debug("removing port %s from internal_ports cache", p)
self.internal_ports.remove(p)
enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
+ for subnet in p['subnets']:
+ if ipv6_utils.is_ipv6_pd_enabled(subnet):
+ pd.disable_subnet(self.router_id, subnet['id'])
+ updated_cidrs = []
if updated_ports:
for index, p in enumerate(internal_ports):
if not updated_ports.get(p['id']):
interface_name = self.get_internal_device_name(p['id'])
ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips'])
LOG.debug("updating internal network for port %s", p)
+ updated_cidrs += ip_cidrs
self.internal_network_updated(interface_name, ip_cidrs)
enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
+ # Check if there is any pd prefix update
+ for p in internal_ports:
+ if p['id'] in (set(current_port_ids) & set(existing_port_ids)):
+ for subnet in p.get('subnets', []):
+ if ipv6_utils.is_ipv6_pd_enabled(subnet):
+ old_prefix = pd.update_subnet(self.router_id,
+ subnet['id'],
+ subnet['cidr'])
+ if old_prefix:
+ self._internal_network_updated(p, subnet['id'],
+ subnet['cidr'],
+ old_prefix,
+ updated_cidrs)
+ enable_ra = True
+
# Enable RA
if enable_ra:
self.enable_radvd(internal_ports)
for stale_dev in stale_devs:
LOG.debug('Deleting stale internal router device: %s',
stale_dev)
+ pd.remove_stale_ri_ifname(self.router_id, stale_dev)
self.driver.unplug(stale_dev,
namespace=self.ns_name,
prefix=INTERNAL_DEV_PREFIX)
def _gateway_ports_equal(port1, port2):
return port1 == port2
- def _process_external_gateway(self, ex_gw_port):
+ def _process_external_gateway(self, ex_gw_port, pd):
# TODO(Carl) Refactor to clarify roles of ex_gw_port vs self.ex_gw_port
ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
self.ex_gw_port and self.ex_gw_port['id'])
if ex_gw_port:
if not self.ex_gw_port:
self.external_gateway_added(ex_gw_port, interface_name)
+ pd.add_gw_interface(self.router['id'], interface_name)
elif not self._gateway_ports_equal(ex_gw_port, self.ex_gw_port):
self.external_gateway_updated(ex_gw_port, interface_name)
elif not ex_gw_port and self.ex_gw_port:
self.external_gateway_removed(self.ex_gw_port, interface_name)
+ pd.remove_gw_interface(self.router['id'])
existing_devices = self._get_existing_devices()
stale_devs = [dev for dev in existing_devices
and dev != interface_name]
for stale_dev in stale_devs:
LOG.debug('Deleting stale external router device: %s', stale_dev)
+ pd.remove_gw_interface(self.router['id'])
self.driver.unplug(stale_dev,
bridge=self.agent_conf.external_network_bridge,
namespace=self.ns_name,
try:
with self.iptables_manager.defer_apply():
ex_gw_port = self.get_ex_gw_port()
- self._process_external_gateway(ex_gw_port)
+ self._process_external_gateway(ex_gw_port, agent.pd)
if not ex_gw_port:
return
:param agent: Passes the agent in order to send RPC messages.
"""
LOG.debug("process router updates")
- self._process_internal_ports()
+ self._process_internal_ports(agent.pd)
+ agent.pd.sync_router(self.router['id'])
self.process_external(agent)
# Process static routes for router
self.routes_updated()
# Lower value is higher priority
PRIORITY_RPC = 0
PRIORITY_SYNC_ROUTERS_TASK = 1
+PRIORITY_PD_UPDATE = 2
DELETE_ROUTER = 1
+PD_UPDATE = 2
class RouterUpdate(object):
--- /dev/null
+# Copyright 2015 Cisco Systems
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import jinja2
+import os
+from oslo_config import cfg
+import shutil
+import six
+
+from neutron.agent.linux import external_process
+from neutron.agent.linux import pd
+from neutron.agent.linux import pd_driver
+from neutron.agent.linux import utils
+from neutron.common import constants
+from oslo_log import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+PD_SERVICE_NAME = 'dibbler'
+CONFIG_TEMPLATE = jinja2.Template("""
+# Config for dibbler-client.
+
+# Use enterprise number based duid
+duid-type duid-en {{ enterprise_number }} {{ va_id }}
+
+# 8 (Debug) is most verbose. 7 (Info) is usually the best option
+log-level 8
+
+# No automatic downlink address assignment
+downlink-prefix-ifaces "none"
+
+# Use script to notify l3_agent of assigned prefix
+script {{ script_path }}
+
+# Ask for prefix over the external gateway interface
+iface {{ interface_name }} {
+# Bind to generated LLA
+bind-to-address {{ bind_address }}
+# ask for address
+ pd 1
+}
+""")
+
+# The first line must be #!/usr/bin/env bash
+SCRIPT_TEMPLATE = jinja2.Template("""#!/usr/bin/env bash
+
+exec neutron-pd-notify $1 {{ prefix_path }} {{ l3_agent_pid }}
+""")
+
+
+class PDDibbler(pd_driver.PDDriverBase):
+ def __init__(self, router_id, subnet_id, ri_ifname):
+ super(PDDibbler, self).__init__(router_id, subnet_id, ri_ifname)
+ self.requestor_id = "%s:%s:%s" % (self.router_id,
+ self.subnet_id,
+ self.ri_ifname)
+ self.dibbler_client_working_area = "%s/%s" % (cfg.CONF.pd_confs,
+ self.requestor_id)
+ self.prefix_path = "%s/prefix" % self.dibbler_client_working_area
+ self.pid_path = "%s/client.pid" % self.dibbler_client_working_area
+ self.converted_subnet_id = self.subnet_id.replace('-', '')
+
+ def _is_dibbler_client_running(self):
+ return utils.get_value_from_file(self.pid_path)
+
+ def _generate_dibbler_conf(self, ex_gw_ifname, lla):
+ dcwa = self.dibbler_client_working_area
+ script_path = utils.get_conf_file_name(dcwa, 'notify', 'sh', True)
+ buf = six.StringIO()
+ buf.write('%s' % SCRIPT_TEMPLATE.render(
+ prefix_path=self.prefix_path,
+ l3_agent_pid=os.getpid()))
+ utils.replace_file(script_path, buf.getvalue())
+ os.chmod(script_path, 0o744)
+
+ dibbler_conf = utils.get_conf_file_name(dcwa, 'client', 'conf', False)
+ buf = six.StringIO()
+ buf.write('%s' % CONFIG_TEMPLATE.render(
+ enterprise_number=cfg.CONF.vendor_pen,
+ va_id='0x%s' % self.converted_subnet_id,
+ script_path='"%s/notify.sh"' % dcwa,
+ interface_name='"%s"' % ex_gw_ifname,
+ bind_address='%s' % lla))
+
+ utils.replace_file(dibbler_conf, buf.getvalue())
+ return dcwa
+
+ def _spawn_dibbler(self, pmon, router_ns, dibbler_conf):
+ def callback(pid_file):
+ dibbler_cmd = ['dibbler-client',
+ 'start',
+ '-w', '%s' % dibbler_conf]
+ return dibbler_cmd
+
+ pm = external_process.ProcessManager(
+ uuid=self.requestor_id,
+ default_cmd_callback=callback,
+ namespace=router_ns,
+ service=PD_SERVICE_NAME,
+ conf=cfg.CONF,
+ pid_file=self.pid_path)
+ pm.enable(reload_cfg=False)
+ pmon.register(uuid=self.requestor_id,
+ service_name=PD_SERVICE_NAME,
+ monitored_process=pm)
+
+ def enable(self, pmon, router_ns, ex_gw_ifname, lla):
+ LOG.debug("Enable IPv6 PD for router %s subnet %s ri_ifname %s",
+ self.router_id, self.subnet_id, self.ri_ifname)
+ if not self._is_dibbler_client_running():
+ dibbler_conf = self._generate_dibbler_conf(ex_gw_ifname, lla)
+ self._spawn_dibbler(pmon, router_ns, dibbler_conf)
+ LOG.debug("dibbler client enabled for router %s subnet %s"
+ " ri_ifname %s",
+ self.router_id, self.subnet_id, self.ri_ifname)
+
+ def disable(self, pmon, router_ns):
+ LOG.debug("Disable IPv6 PD for router %s subnet %s ri_ifname %s",
+ self.router_id, self.subnet_id, self.ri_ifname)
+ dcwa = self.dibbler_client_working_area
+
+ def callback(pid_file):
+ dibbler_cmd = ['dibbler-client',
+ 'stop',
+ '-w', '%s' % dcwa]
+ return dibbler_cmd
+
+ pmon.unregister(uuid=self.requestor_id,
+ service_name=PD_SERVICE_NAME)
+ pm = external_process.ProcessManager(
+ uuid=self.requestor_id,
+ namespace=router_ns,
+ service=PD_SERVICE_NAME,
+ conf=cfg.CONF,
+ pid_file=self.pid_path)
+ pm.disable(get_stop_command=callback)
+ shutil.rmtree(dcwa, ignore_errors=True)
+ LOG.debug("dibbler client disabled for router %s subnet %s "
+ "ri_ifname %s",
+ self.router_id, self.subnet_id, self.ri_ifname)
+
+ def get_prefix(self):
+ prefix = utils.get_value_from_file(self.prefix_path)
+ if not prefix:
+ prefix = constants.PROVISIONAL_IPV6_PD_PREFIX
+ return prefix
+
+ @staticmethod
+ def get_sync_data():
+ try:
+ requestor_ids = os.listdir(cfg.CONF.pd_confs)
+ except OSError:
+ return []
+
+ sync_data = []
+ requestors = (r.split(':') for r in requestor_ids if r.count(':') == 2)
+ for router_id, subnet_id, ri_ifname in requestors:
+ pd_info = pd.PDInfo()
+ pd_info.router_id = router_id
+ pd_info.subnet_id = subnet_id
+ pd_info.ri_ifname = ri_ifname
+ pd_info.driver = PDDibbler(router_id, subnet_id, ri_ifname)
+ pd_info.client_started = (
+ pd_info.driver._is_dibbler_client_running())
+ pd_info.prefix = pd_info.driver.get_prefix()
+ sync_data.append(pd_info)
+
+ return sync_data
def reload_cfg(self):
self.disable('HUP')
- def disable(self, sig='9'):
+ def disable(self, sig='9', get_stop_command=None):
pid = self.pid
if self.active:
- cmd = ['kill', '-%s' % (sig), pid]
- utils.execute(cmd, run_as_root=True)
- # In the case of shutting down, remove the pid file
- if sig == '9':
- fileutils.delete_if_exists(self.get_pid_file_name())
+ if get_stop_command:
+ cmd = get_stop_command(self.get_pid_file_name())
+ ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
+ ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env)
+ else:
+ cmd = ['kill', '-%s' % (sig), pid]
+ utils.execute(cmd, run_as_root=True)
+ # In the case of shutting down, remove the pid file
+ if sig == '9':
+ fileutils.delete_if_exists(self.get_pid_file_name())
elif pid:
LOG.debug('Process for %(uuid)s pid %(pid)d is stale, ignoring '
'signal %(signal)s', {'uuid': self.uuid, 'pid': pid,
LOG.debug("deleting onlink route(%s)", route)
device.route.delete_onlink_route(route)
+ def add_ipv6_addr(self, device_name, v6addr, namespace, scope='global'):
+ device = ip_lib.IPDevice(device_name,
+ namespace=namespace)
+ net = netaddr.IPNetwork(v6addr)
+ device.addr.add(str(net), scope)
+
+ def delete_ipv6_addr(self, device_name, v6addr, namespace):
+ device = ip_lib.IPDevice(device_name,
+ namespace=namespace)
+ device.delete_addr_and_conntrack_state(v6addr)
+
+ def delete_ipv6_addr_with_prefix(self, device_name, prefix, namespace):
+ """Delete the first listed IPv6 address that falls within a given
+ prefix.
+ """
+ device = ip_lib.IPDevice(device_name, namespace=namespace)
+ net = netaddr.IPNetwork(prefix)
+ for address in device.addr.list(scope='global', filters=['permanent']):
+ ip_address = netaddr.IPNetwork(address['cidr'])
+ if ip_address in net:
+ device.delete_addr_and_conntrack_state(address['cidr'])
+ break
+
+ def get_ipv6_llas(self, device_name, namespace):
+ device = ip_lib.IPDevice(device_name,
+ namespace=namespace)
+
+ return device.addr.list(scope='link', ip_version=6)
+
def check_bridge_exists(self, bridge):
if not ip_lib.device_exists(bridge):
raise exceptions.BridgeDoesNotExist(bridge=bridge)
--- /dev/null
+# Copyright 2015 Cisco Systems
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import eventlet
+import functools
+import signal
+import six
+
+from stevedore import driver
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from neutron.agent.linux import utils as linux_utils
+from neutron.callbacks import events
+from neutron.callbacks import registry
+from neutron.callbacks import resources
+from neutron.common import constants as l3_constants
+from neutron.common import ipv6_utils
+from neutron.common import utils
+
+LOG = logging.getLogger(__name__)
+
+OPTS = [
+ cfg.StrOpt('pd_dhcp_driver',
+ default='dibbler',
+ help=_('Service to handle DHCPv6 Prefix delegation.')),
+]
+
+cfg.CONF.register_opts(OPTS)
+
+
+class PrefixDelegation(object):
+ def __init__(self, context, pmon, intf_driver, notifier, pd_update_cb,
+ agent_conf):
+ self.context = context
+ self.pmon = pmon
+ self.intf_driver = intf_driver
+ self.notifier = notifier
+ self.routers = {}
+ self.pd_update_cb = pd_update_cb
+ self.agent_conf = agent_conf
+ self.pd_dhcp_driver = driver.DriverManager(
+ namespace='neutron.agent.linux.pd_drivers',
+ name=agent_conf.prefix_delegation_driver,
+ ).driver
+ registry.subscribe(add_router,
+ resources.ROUTER,
+ events.BEFORE_CREATE)
+ registry.subscribe(remove_router,
+ resources.ROUTER,
+ events.AFTER_DELETE)
+ self._get_sync_data()
+
+ @utils.synchronized("l3-agent-pd")
+ def enable_subnet(self, router_id, subnet_id, prefix, ri_ifname, mac):
+ router = self.routers.get(router_id)
+ if router is None:
+ return
+
+ pd_info = router['subnets'].get(subnet_id)
+ if not pd_info:
+ pd_info = PDInfo(ri_ifname=ri_ifname, mac=mac)
+ router['subnets'][subnet_id] = pd_info
+
+ pd_info.bind_lla = self._get_lla(mac)
+ if pd_info.sync:
+ pd_info.mac = mac
+ pd_info.old_prefix = prefix
+ else:
+ self._add_lla(router, pd_info.get_bind_lla_with_mask())
+
+ def _delete_pd(self, router, pd_info):
+ self._delete_lla(router, pd_info.get_bind_lla_with_mask())
+ if pd_info.client_started:
+ pd_info.driver.disable(self.pmon, router['ns_name'])
+
+ @utils.synchronized("l3-agent-pd")
+ def disable_subnet(self, router_id, subnet_id):
+ prefix_update = {}
+ router = self.routers.get(router_id)
+ if not router:
+ return
+ pd_info = router['subnets'].get(subnet_id)
+ if not pd_info:
+ return
+ self._delete_pd(router, pd_info)
+ prefix_update[subnet_id] = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
+ del router['subnets'][subnet_id]
+ LOG.debug("Update server with prefixes: %s", prefix_update)
+ self.notifier(self.context, prefix_update)
+
+ @utils.synchronized("l3-agent-pd")
+ def update_subnet(self, router_id, subnet_id, prefix):
+ router = self.routers.get(router_id)
+ if router is not None:
+ pd_info = router['subnets'].get(subnet_id)
+ if pd_info and pd_info.old_prefix != prefix:
+ old_prefix = pd_info.old_prefix
+ pd_info.old_prefix = prefix
+ return old_prefix
+
+ @utils.synchronized("l3-agent-pd")
+ def add_gw_interface(self, router_id, gw_ifname):
+ router = self.routers.get(router_id)
+ prefix_update = {}
+ if not router:
+ return
+ router['gw_interface'] = gw_ifname
+ for subnet_id, pd_info in six.iteritems(router['subnets']):
+ # gateway is added after internal router ports.
+ # If a PD is being synced, and if the prefix is available,
+ # send update if prefix out of sync; If not available,
+ # start the PD client
+ bind_lla_with_mask = pd_info.get_bind_lla_with_mask()
+ if pd_info.sync:
+ pd_info.sync = False
+ if pd_info.client_started:
+ if pd_info.prefix != pd_info.old_prefix:
+ prefix_update['subnet_id'] = pd_info.prefix
+ else:
+ self._delete_lla(router, bind_lla_with_mask)
+ self._add_lla(router, bind_lla_with_mask)
+ else:
+ self._add_lla(router, bind_lla_with_mask)
+ if prefix_update:
+ LOG.debug("Update server with prefixes: %s", prefix_update)
+ self.notifier(self.context, prefix_update)
+
+ def delete_router_pd(self, router):
+ prefix_update = {}
+ for subnet_id, pd_info in six.iteritems(router['subnets']):
+ self._delete_lla(router, pd_info.get_bind_lla_with_mask())
+ if pd_info.client_started:
+ pd_info.driver.disable(self.pmon, router['ns_name'])
+ pd_info.prefix = None
+ pd_info.client_started = False
+ prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
+ prefix_update[subnet_id] = prefix
+ if prefix_update:
+ LOG.debug("Update server with prefixes: %s", prefix_update)
+ self.notifier(self.context, prefix_update)
+
+ @utils.synchronized("l3-agent-pd")
+ def remove_gw_interface(self, router_id):
+ router = self.routers.get(router_id)
+ if router is not None:
+ router['gw_interface'] = None
+ self.delete_router_pd(router)
+
+ @utils.synchronized("l3-agent-pd")
+ def sync_router(self, router_id):
+ router = self.routers.get(router_id)
+ if router is not None and router['gw_interface'] is None:
+ self.delete_router_pd(router)
+
+ @utils.synchronized("l3-agent-pd")
+ def remove_stale_ri_ifname(self, router_id, stale_ifname):
+ router = self.routers.get(router_id)
+ if router is not None:
+ for subnet_id, pd_info in router['subnets'].items():
+ if pd_info.ri_ifname == stale_ifname:
+ self._delete_pd(router, pd_info)
+ del router['subnets'][subnet_id]
+
+ @staticmethod
+ def _get_lla(mac):
+ lla = ipv6_utils.get_ipv6_addr_by_EUI64(l3_constants.IPV6_LLA_PREFIX,
+ mac)
+ return lla
+
+ def _get_llas(self, gw_ifname, ns_name):
+ try:
+ return self.intf_driver.get_ipv6_llas(gw_ifname, ns_name)
+ except RuntimeError:
+ # The error message was printed as part of the driver call
+ # This could happen if the gw_ifname was removed
+ # simply return and exit the thread
+ return
+
+ def _add_lla(self, router, lla_with_mask):
+ if router['gw_interface']:
+ self.intf_driver.add_ipv6_addr(router['gw_interface'],
+ lla_with_mask,
+ router['ns_name'],
+ 'link')
+ # There is a delay before the LLA becomes active.
+ # This is because the kernal runs DAD to make sure LLA uniqueness
+ # Spawn a thread to wait for the interface to be ready
+ self._spawn_lla_thread(router['gw_interface'],
+ router['ns_name'],
+ lla_with_mask)
+
+ def _spawn_lla_thread(self, gw_ifname, ns_name, lla_with_mask):
+ eventlet.spawn_n(self._ensure_lla_task,
+ gw_ifname,
+ ns_name,
+ lla_with_mask)
+
+ def _delete_lla(self, router, lla_with_mask):
+ if lla_with_mask and router['gw_interface']:
+ try:
+ self.intf_driver.delete_ipv6_addr(router['gw_interface'],
+ lla_with_mask,
+ router['ns_name'])
+ except RuntimeError:
+ # Ignore error if the lla doesn't exist
+ pass
+
+ def _ensure_lla_task(self, gw_ifname, ns_name, lla_with_mask):
+ # It would be insane for taking so long unless DAD test failed
+ # In that case, the subnet would never be assigned a prefix.
+ linux_utils.wait_until_true(functools.partial(self._lla_available,
+ gw_ifname,
+ ns_name,
+ lla_with_mask),
+ timeout=l3_constants.LLA_TASK_TIMEOUT,
+ sleep=2)
+
+ def _lla_available(self, gw_ifname, ns_name, lla_with_mask):
+ llas = self._get_llas(gw_ifname, ns_name)
+ if self._is_lla_active(lla_with_mask, llas):
+ LOG.debug("LLA %s is active now" % lla_with_mask)
+ self.pd_update_cb()
+ return True
+
+ @staticmethod
+ def _is_lla_active(lla_with_mask, llas):
+ for lla in llas:
+ if lla_with_mask == lla['cidr']:
+ return not lla['tentative']
+ return False
+
+ @utils.synchronized("l3-agent-pd")
+ def process_prefix_update(self):
+ LOG.debug("Processing IPv6 PD Prefix Update")
+
+ prefix_update = {}
+ for router_id, router in six.iteritems(self.routers):
+ if not router['gw_interface']:
+ continue
+
+ llas = None
+ for subnet_id, pd_info in six.iteritems(router['subnets']):
+ if pd_info.client_started:
+ prefix = pd_info.driver.get_prefix()
+ if prefix != pd_info.prefix:
+ pd_info.prefix = prefix
+ prefix_update[subnet_id] = prefix
+ else:
+ if not llas:
+ llas = self._get_llas(router['gw_interface'],
+ router['ns_name'])
+
+ if self._is_lla_active(pd_info.get_bind_lla_with_mask(),
+ llas):
+ if not pd_info.driver:
+ pd_info.driver = self.pd_dhcp_driver(
+ router_id, subnet_id, pd_info.ri_ifname)
+ pd_info.driver.enable(self.pmon, router['ns_name'],
+ router['gw_interface'],
+ pd_info.bind_lla)
+ pd_info.client_started = True
+
+ if prefix_update:
+ LOG.debug("Update server with prefixes: %s", prefix_update)
+ self.notifier(self.context, prefix_update)
+
+ def after_start(self):
+ LOG.debug('SIGHUP signal handler set')
+ signal.signal(signal.SIGHUP, self._handle_sighup)
+
+ def _handle_sighup(self, signum, frame):
+ # The external DHCPv6 client uses SIGHUP to notify agent
+ # of prefix changes.
+ self.pd_update_cb()
+
+ def _get_sync_data(self):
+ sync_data = self.pd_dhcp_driver.get_sync_data()
+ for pd_info in sync_data:
+ router_id = pd_info.router_id
+ if not self.routers.get(router_id):
+ self.routers[router_id] = {'gw_interface': None,
+ 'ns_name': None,
+ 'subnets': {}}
+ new_pd_info = PDInfo(pd_info=pd_info)
+ subnets = self.routers[router_id]['subnets']
+ subnets[pd_info.subnet_id] = new_pd_info
+
+
+@utils.synchronized("l3-agent-pd")
+def remove_router(resource, event, l3_agent, **kwargs):
+ router = l3_agent.pd.routers.get(kwargs['router'].router_id)
+ l3_agent.pd.delete_router_pd(router)
+ del l3_agent.pd.routers[router['id']]['subnets']
+ del l3_agent.pd.routers[router['id']]
+
+
+@utils.synchronized("l3-agent-pd")
+def add_router(resource, event, l3_agent, **kwargs):
+ added_router = kwargs['router']
+ router = l3_agent.pd.routers.get(added_router.router_id)
+ if not router:
+ l3_agent.pd.routers[added_router.router_id] = {
+ 'gw_interface': None,
+ 'ns_name': added_router.ns_name,
+ 'subnets': {}}
+ else:
+ # This will happen during l3 agent restart
+ router['ns_name'] = added_router.ns_name
+
+
+class PDInfo(object):
+ """A class to simplify storing and passing of information relevant to
+ Prefix Delegation operations for a given subnet.
+ """
+ def __init__(self, pd_info=None, ri_ifname=None, mac=None):
+ if pd_info is None:
+ self.prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
+ self.old_prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
+ self.ri_ifname = ri_ifname
+ self.mac = mac
+ self.bind_lla = None
+ self.sync = False
+ self.driver = None
+ self.client_started = False
+ else:
+ self.prefix = pd_info.prefix
+ self.old_prefix = None
+ self.ri_ifname = pd_info.ri_ifname
+ self.mac = None
+ self.bind_lla = None
+ self.sync = True
+ self.driver = pd_info.driver
+ self.client_started = pd_info.client_started
+
+ def get_bind_lla_with_mask(self):
+ bind_lla_with_mask = '%s/64' % self.bind_lla
+ return bind_lla_with_mask
--- /dev/null
+# Copyright 2015 Cisco Systems
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+import six
+
+from oslo_config import cfg
+
+OPTS = [
+ cfg.StrOpt('pd_confs',
+ default='$state_path/pd',
+ help=_('Location to store IPv6 PD files.')),
+ cfg.StrOpt('vendor_pen',
+ default='8888',
+ help=_("A decimal value as Vendor's Registered Private "
+ "Enterprise Number as required by RFC3315 DUID-EN.")),
+]
+
+cfg.CONF.register_opts(OPTS)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class PDDriverBase(object):
+
+ def __init__(self, router_id, subnet_id, ri_ifname):
+ self.router_id = router_id
+ self.subnet_id = subnet_id
+ self.ri_ifname = ri_ifname
+
+ @abc.abstractmethod
+ def enable(self, pmon, router_ns, ex_gw_ifname, lla):
+ """Enable IPv6 Prefix Delegation for this PDDriver on the given
+ external interface, with the given link local address
+ """
+
+ @abc.abstractmethod
+ def disable(self, pmon, router_ns):
+ """Disable IPv6 Prefix Delegation for this PDDriver
+ """
+
+ @abc.abstractmethod
+ def get_prefix(self):
+ """Get the current assigned prefix for this PDDriver from the PD agent.
+ If no prefix is currently assigned, return
+ constants.PROVISIONAL_IPV6_PD_PREFIX
+ """
+
+ @staticmethod
+ @abc.abstractmethod
+ def get_sync_data():
+ """Get the latest router_id, subnet_id, and ri_ifname from the PD agent
+ so that the PDDriver can be kept up to date
+ """
--- /dev/null
+# Copyright (c) 2015 Cisco Systems.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import signal
+import sys
+
+from neutron.common import utils
+
+
+def main():
+ """Expected arguments:
+ sys.argv[1] - The add/update/delete operation performed by the PD agent
+ sys.argv[2] - The file where the new prefix should be written
+ sys.argv[3] - The process ID of the L3 agent to be notified of this change
+ """
+ operation = sys.argv[1]
+ prefix_fname = sys.argv[2]
+ agent_pid = sys.argv[3]
+ prefix = os.getenv('PREFIX1', "::")
+
+ if operation == "add" or operation == "update":
+ utils.replace_file(prefix_fname, "%s/64" % prefix)
+ elif operation == "delete":
+ utils.replace_file(prefix_fname, "::/64")
+ os.kill(int(agent_pid), signal.SIGHUP)
MINIMUM_DNSMASQ_VERSION = 2.67
+MINIMUM_DIBBLER_VERSION = '1.0.1'
def ovs_vxlan_supported(from_ip='192.0.2.1', to_ip='192.0.2.2'):
LOG.debug("Exception while checking for installed ebtables. "
"Exception: %s", e)
return False
+
+
+def get_minimal_dibbler_version_supported():
+ return MINIMUM_DIBBLER_VERSION
+
+
+def dibbler_version_supported():
+ try:
+ cmd = ['dibbler-client',
+ 'help']
+ out = agent_utils.execute(cmd)
+ return '-w' in out
+ except (OSError, RuntimeError, IndexError, ValueError) as e:
+ LOG.debug("Exception while checking minimal dibbler version. "
+ "Exception: %s", e)
+ return False
return result
+def check_dibbler_version():
+ result = checks.dibbler_version_supported()
+ if not result:
+ LOG.error(_LE('The installed version of dibbler-client is too old. '
+ 'Please update to at least version %s.'),
+ checks.get_minimal_dibbler_version_supported())
+ return result
+
+
def check_nova_notify():
result = checks.nova_notify_supported()
if not result:
help=_('Check ebtables installation')),
BoolOptCallback('keepalived_ipv6_support', check_keepalived_ipv6_support,
help=_('Check keepalived IPv6 support')),
+ BoolOptCallback('dibbler_version', check_dibbler_version,
+ help=_('Check minimal dibbler version')),
]
# Special provisional prefix for IPv6 Prefix Delegation
PROVISIONAL_IPV6_PD_PREFIX = '::/64'
+# Timeout in seconds for getting an IPv6 LLA
+LLA_TASK_TIMEOUT = 40
+
# Linux interface max length
DEVICE_NAME_MAX_LEN = 15
import random
import signal
import socket
+import tempfile
import uuid
from eventlet.green import subprocess
# versions (2.x vs. 3.x)
return int(decimal.Decimal(val).quantize(decimal.Decimal('1'),
rounding=decimal.ROUND_HALF_UP))
+
+
+def replace_file(file_name, data):
+ """Replaces the contents of file_name with data in a safe manner.
+
+ First write to a temp file and then rename. Since POSIX renames are
+ atomic, the file is unlikely to be corrupted by competing writes.
+
+ We create the tempfile on the same device to ensure that it can be renamed.
+ """
+
+ base_dir = os.path.dirname(os.path.abspath(file_name))
+ with tempfile.NamedTemporaryFile('w+',
+ dir=base_dir,
+ delete=False) as tmp_file:
+ tmp_file.write(data)
+ os.chmod(tmp_file.name, 0o644)
+ os.rename(tmp_file.name, file_name)
router[l3_constants.INTERFACE_KEY] = interfaces
+def router_append_pd_enabled_subnet(router, count=1):
+ interfaces = router[l3_constants.INTERFACE_KEY]
+ current = sum(netaddr.IPNetwork(subnet['cidr']).version == 6
+ for p in interfaces for subnet in p['subnets'])
+
+ mac_address = netaddr.EUI('ca:fe:de:ad:be:ef')
+ mac_address.dialect = netaddr.mac_unix
+ pd_intfs = []
+ for i in range(current, current + count):
+ subnet_id = _uuid()
+ intf = {'id': _uuid(),
+ 'network_id': _uuid(),
+ 'admin_state_up': True,
+ 'fixed_ips': [{'ip_address': '::1',
+ 'prefixlen': 64,
+ 'subnet_id': subnet_id}],
+ 'mac_address': str(mac_address),
+ 'subnets': [{'id': subnet_id,
+ 'cidr': l3_constants.PROVISIONAL_IPV6_PD_PREFIX,
+ 'gateway_ip': '::1',
+ 'ipv6_ra_mode': l3_constants.IPV6_SLAAC,
+ 'subnetpool_id': l3_constants.IPV6_PD_POOL_ID}]}
+ interfaces.append(intf)
+ pd_intfs.append(intf)
+ mac_address.value += 1
+ return pd_intfs
+
+
def prepare_ext_gw_test(context, ri, dual_stack=False):
subnet_id = _uuid()
fixed_ips = [{'subnet_id': subnet_id,
def test_dnsmasq_version(self):
checks.dnsmasq_version_supported()
+ def test_dibbler_version(self):
+ checks.dibbler_version_supported()
+
class SanityTestCaseRoot(functional_base.BaseSudoTestCase):
"""Sanity checks that require root access.
from oslo_log import log
import oslo_messaging
from oslo_utils import uuidutils
+import six
from testtools import matchers
from neutron.agent.common import config as agent_config
from neutron.agent.l3 import link_local_allocator as lla
from neutron.agent.l3 import namespaces
from neutron.agent.l3 import router_info as l3router
+from neutron.agent.linux import dibbler
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
+from neutron.agent.linux import pd
from neutron.agent.linux import ra
from neutron.agent.metadata import driver as metadata_driver
from neutron.agent import rpc as agent_rpc
self.assertFalse(nat_rules_delta)
return ri
- def _expected_call_lookup_ri_process(self, ri, process):
- """Expected call if a process is looked up in a router instance."""
- return [mock.call(uuid=ri.router['id'],
- service=process,
+ def _radvd_expected_call_external_process(self, ri, enable=True):
+ expected_calls = [mock.call(uuid=ri.router['id'],
+ service='radvd',
default_cmd_callback=mock.ANY,
namespace=ri.ns_name,
conf=mock.ANY,
run_as_root=True)]
+ if enable:
+ expected_calls.append(mock.call().enable(reload_cfg=True))
+ else:
+ expected_calls.append(mock.call().disable())
+ return expected_calls
def _process_router_ipv6_subnet_added(
self, router, ipv6_subnet_modes=None):
self._process_router_instance_for_agent(agent, ri, router)
return ri
- def _assert_ri_process_enabled(self, ri, process):
+ def _assert_ri_process_enabled(self, ri):
"""Verify that process was enabled for a router instance."""
- expected_calls = self._expected_call_lookup_ri_process(
- ri, process)
- expected_calls.append(mock.call().enable(reload_cfg=True))
+ expected_calls = self._radvd_expected_call_external_process(ri)
self.assertEqual(expected_calls, self.external_process.mock_calls)
- def _assert_ri_process_disabled(self, ri, process):
+ def _assert_ri_process_disabled(self, ri):
"""Verify that process was disabled for a router instance."""
- expected_calls = self._expected_call_lookup_ri_process(
- ri, process)
- expected_calls.append(mock.call().disable())
+ expected_calls = self._radvd_expected_call_external_process(ri, False)
self.assertEqual(expected_calls, self.external_process.mock_calls)
def test_process_router_ipv6_interface_added(self):
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_interface_added(router)
- self._assert_ri_process_enabled(ri, 'radvd')
+ self._assert_ri_process_enabled(ri)
# Expect radvd configured without prefix
self.assertNotIn('prefix',
self.utils_replace_file.call_args[0][1].split())
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_interface_added(
router, ra_mode=l3_constants.IPV6_SLAAC)
- self._assert_ri_process_enabled(ri, 'radvd')
+ self._assert_ri_process_enabled(ri)
# Expect radvd configured with prefix
self.assertIn('prefix',
self.utils_replace_file.call_args[0][1].split())
'address_mode': l3_constants.DHCPV6_STATELESS},
{'ra_mode': l3_constants.DHCPV6_STATEFUL,
'address_mode': l3_constants.DHCPV6_STATEFUL}])
- self._assert_ri_process_enabled(ri, 'radvd')
+ self._assert_ri_process_enabled(ri)
radvd_config = self.utils_replace_file.call_args[0][1].split()
# Assert we have a prefix from IPV6_SLAAC and a prefix from
# DHCPV6_STATELESS on one interface
{'ra_mode': l3_constants.IPV6_SLAAC,
'address_mode': l3_constants.IPV6_SLAAC}])
self._process_router_instance_for_agent(agent, ri, router)
- self._assert_ri_process_enabled(ri, 'radvd')
+ self._assert_ri_process_enabled(ri)
radvd_config = self.utils_replace_file.call_args[0][1].split()
self.assertEqual(1, len(ri.internal_ports[1]['subnets']))
self.assertEqual(1, len(ri.internal_ports[1]['fixed_ips']))
self._process_router_instance_for_agent(agent, ri, router)
# radvd should have been enabled again and the interface
# should have two prefixes
- self._assert_ri_process_enabled(ri, 'radvd')
+ self._assert_ri_process_enabled(ri)
radvd_config = self.utils_replace_file.call_args[0][1].split()
self.assertEqual(2, len(ri.internal_ports[1]['subnets']))
self.assertEqual(2, len(ri.internal_ports[1]['fixed_ips']))
l3_test_common.router_append_interface(router, count=1, ip_version=6)
# Reassign the router object to RouterInfo
self._process_router_instance_for_agent(agent, ri, router)
- self._assert_ri_process_enabled(ri, 'radvd')
+ self._assert_ri_process_enabled(ri)
def test_process_router_interface_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
# Add an IPv6 interface and reprocess
l3_test_common.router_append_interface(router, count=1, ip_version=6)
self._process_router_instance_for_agent(agent, ri, router)
- self._assert_ri_process_enabled(ri, 'radvd')
+ self._assert_ri_process_enabled(ri)
# Reset the calls so we can check for disable radvd
self.external_process.reset_mock()
self.process_monitor.reset_mock()
# Remove the IPv6 interface and reprocess
del router[l3_constants.INTERFACE_KEY][1]
self._process_router_instance_for_agent(agent, ri, router)
- self._assert_ri_process_disabled(ri, 'radvd')
+ self._assert_ri_process_disabled(ri)
def test_process_router_ipv6_subnet_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
'address_mode': l3_constants.IPV6_SLAAC}]
* 2))
self._process_router_instance_for_agent(agent, ri, router)
- self._assert_ri_process_enabled(ri, 'radvd')
+ self._assert_ri_process_enabled(ri)
# Reset mocks to check for modified radvd config
self.utils_replace_file.reset_mock()
self.external_process.reset_mock()
self._process_router_instance_for_agent(agent, ri, router)
# Assert radvd was enabled again and that we only have one
# prefix on the interface
- self._assert_ri_process_enabled(ri, 'radvd')
+ self._assert_ri_process_enabled(ri)
radvd_config = self.utils_replace_file.call_args[0][1].split()
self.assertEqual(1, len(ri.internal_ports[1]['subnets']))
self.assertEqual(1, len(ri.internal_ports[1]['fixed_ips']))
self.utils_replace_file.call_args[0][1])
assertFlag(managed_flag)('AdvManagedFlag on;',
self.utils_replace_file.call_args[0][1])
+
+ def _pd_expected_call_external_process(self, requestor, ri, enable=True):
+ expected_calls = []
+ if enable:
+ expected_calls.append(mock.call(uuid=requestor,
+ service='dibbler',
+ default_cmd_callback=mock.ANY,
+ namespace=ri.ns_name,
+ conf=mock.ANY,
+ pid_file=mock.ANY))
+ expected_calls.append(mock.call().enable(reload_cfg=False))
+ else:
+ expected_calls.append(mock.call(uuid=requestor,
+ service='dibbler',
+ namespace=ri.ns_name,
+ conf=mock.ANY,
+ pid_file=mock.ANY))
+ expected_calls.append(mock.call().disable(
+ get_stop_command=mock.ANY))
+ return expected_calls
+
+ def _pd_setup_agent_router(self):
+ router = l3_test_common.prepare_router_data()
+ ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
+ agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
+ agent.external_gateway_added = mock.Mock()
+ ri.process(agent)
+ agent._router_added(router['id'], router)
+ # Make sure radvd monitor is created
+ if not ri.radvd:
+ ri.radvd = ra.DaemonMonitor(router['id'],
+ ri.ns_name,
+ agent.process_monitor,
+ ri.get_internal_device_name)
+ return agent, router, ri
+
+ def _pd_remove_gw_interface(self, intfs, agent, router, ri):
+ expected_pd_update = {}
+ expected_calls = []
+ for intf in intfs:
+ requestor_id = self._pd_get_requestor_id(intf, router, ri)
+ expected_calls += (self._pd_expected_call_external_process(
+ requestor_id, ri, False))
+ for subnet in intf['subnets']:
+ expected_pd_update[subnet['id']] = (
+ l3_constants.PROVISIONAL_IPV6_PD_PREFIX)
+
+ # Implement the prefix update notifier
+ # Keep track of the updated prefix
+ self.pd_update = {}
+
+ def pd_notifier(context, prefix_update):
+ self.pd_update = prefix_update
+ for subnet_id, prefix in six.iteritems(prefix_update):
+ for intf in intfs:
+ for subnet in intf['subnets']:
+ if subnet['id'] == subnet_id:
+ # Update the prefix
+ subnet['cidr'] = prefix
+ break
+
+ # Remove the gateway interface
+ agent.pd.notifier = pd_notifier
+ agent.pd.remove_gw_interface(router['id'])
+
+ self._pd_assert_dibbler_calls(expected_calls,
+ self.external_process.mock_calls[-len(expected_calls):])
+ self.assertEqual(expected_pd_update, self.pd_update)
+
+ def _pd_remove_interfaces(self, intfs, agent, router, ri):
+ expected_pd_update = []
+ expected_calls = []
+ for intf in intfs:
+ # Remove the router interface
+ router[l3_constants.INTERFACE_KEY].remove(intf)
+ requestor_id = self._pd_get_requestor_id(intf, router, ri)
+ expected_calls += (self._pd_expected_call_external_process(
+ requestor_id, ri, False))
+ for subnet in intf['subnets']:
+ expected_pd_update += [{subnet['id']:
+ l3_constants.PROVISIONAL_IPV6_PD_PREFIX}]
+
+ # Implement the prefix update notifier
+ # Keep track of the updated prefix
+ self.pd_update = []
+
+ def pd_notifier(context, prefix_update):
+ self.pd_update.append(prefix_update)
+ for intf in intfs:
+ for subnet in intf['subnets']:
+ if subnet['id'] == prefix_update.keys()[0]:
+ # Update the prefix
+ subnet['cidr'] = prefix_update.values()[0]
+
+ # Process the router for removed interfaces
+ agent.pd.notifier = pd_notifier
+ ri.process(agent)
+
+ # The number of external process calls takes radvd into account.
+ # This is because there is no ipv6 interface any more after removing
+ # the interfaces, and radvd will be killed because of that
+ self._pd_assert_dibbler_calls(expected_calls,
+ self.external_process.mock_calls[-len(expected_calls) - 2:])
+ self._pd_assert_radvd_calls(ri, False)
+ self.assertEqual(expected_pd_update, self.pd_update)
+
+ def _pd_get_requestor_id(self, intf, router, ri):
+ ifname = ri.get_internal_device_name(intf['id'])
+ for subnet in intf['subnets']:
+ return dibbler.PDDibbler(router['id'],
+ subnet['id'], ifname).requestor_id
+
+ def _pd_assert_dibbler_calls(self, expected, actual):
+ '''Check the external process calls for dibbler are expected
+
+ in the case of multiple pd-enabled router ports, the exact sequence
+ of these calls are not deterministic. It's known, though, that each
+ external_process call is followed with either an enable() or disable()
+ '''
+
+ num_ext_calls = len(expected) / 2
+ expected_ext_calls = []
+ actual_ext_calls = []
+ expected_action_calls = []
+ actual_action_calls = []
+ for c in range(num_ext_calls):
+ expected_ext_calls.append(expected[c * 2])
+ actual_ext_calls.append(actual[c * 2])
+ expected_action_calls.append(expected[c * 2 + 1])
+ actual_action_calls.append(actual[c * 2 + 1])
+
+ self.assertEqual(expected_action_calls, actual_action_calls)
+ for exp in expected_ext_calls:
+ for act in actual_ext_calls:
+ if exp == act:
+ break
+ else:
+ msg = "Unexpected dibbler external process call."
+ self.fail(msg)
+
+ def _pd_assert_radvd_calls(self, ri, enable=True):
+ exp_calls = self._radvd_expected_call_external_process(ri, enable)
+ self.assertEqual(exp_calls,
+ self.external_process.mock_calls[-len(exp_calls):])
+
+ def _pd_get_prefixes(self, agent, router, ri,
+ existing_intfs, new_intfs, mock_get_prefix):
+ # First generate the prefixes that will be used for each interface
+ prefixes = {}
+ expected_pd_update = {}
+ expected_calls = []
+ for ifno, intf in enumerate(existing_intfs + new_intfs):
+ requestor_id = self._pd_get_requestor_id(intf, router, ri)
+ prefixes[requestor_id] = "2001:cafe:cafe:%d::/64" % ifno
+ if intf in new_intfs:
+ subnet_id = (intf['subnets'][0]['id'] if intf['subnets']
+ else None)
+ expected_pd_update[subnet_id] = prefixes[requestor_id]
+ expected_calls += (
+ self._pd_expected_call_external_process(requestor_id, ri))
+
+ # Implement the prefix update notifier
+ # Keep track of the updated prefix
+ self.pd_update = {}
+
+ def pd_notifier(context, prefix_update):
+ self.pd_update = prefix_update
+ for subnet_id, prefix in six.iteritems(prefix_update):
+ for intf in new_intfs:
+ for subnet in intf['subnets']:
+ if subnet['id'] == subnet_id:
+ # Update the prefix
+ subnet['cidr'] = prefix
+ break
+
+ # Start the dibbler client
+ agent.pd.notifier = pd_notifier
+ agent.pd.process_prefix_update()
+
+ # Get the prefix and check that the neutron server is notified
+ def get_prefix(pdo):
+ key = '%s:%s:%s' % (pdo.router_id, pdo.subnet_id, pdo.ri_ifname)
+ return prefixes[key]
+ mock_get_prefix.side_effect = get_prefix
+ agent.pd.process_prefix_update()
+
+ # Make sure that the updated prefixes are expected
+ self._pd_assert_dibbler_calls(expected_calls,
+ self.external_process.mock_calls[-len(expected_calls):])
+ self.assertEqual(expected_pd_update, self.pd_update)
+
+ def _pd_add_gw_interface(self, agent, router, ri):
+ gw_ifname = ri.get_external_device_name(router['gw_port']['id'])
+ agent.pd.add_gw_interface(router['id'], gw_ifname)
+
+ @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
+ @mock.patch.object(dibbler.os, 'getpid', return_value=1234)
+ @mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
+ return_value=True)
+ @mock.patch.object(dibbler.os, 'chmod')
+ @mock.patch.object(dibbler.shutil, 'rmtree')
+ @mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
+ def test_pd_add_remove_subnet(self, mock1, mock2, mock3, mock4,
+ mock_getpid, mock_get_prefix):
+ '''Add and remove one pd-enabled subnet
+ Remove the interface by deleting it from the router
+ '''
+ # Initial setup
+ agent, router, ri = self._pd_setup_agent_router()
+
+ # Create one pd-enabled subnet and add router interface
+ intfs = l3_test_common.router_append_pd_enabled_subnet(router)
+ ri.process(agent)
+
+ # No client should be started since there is no gateway port
+ self.assertFalse(self.external_process.call_count)
+ self.assertFalse(mock_get_prefix.call_count)
+
+ # Add the gateway interface
+ self._pd_add_gw_interface(agent, router, ri)
+
+ # Get one prefix
+ self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix)
+
+ # Update the router with the new prefix
+ ri.process(agent)
+
+ # Check that radvd is started and the router port is configured
+ # with the new prefix
+ self._pd_assert_radvd_calls(ri)
+
+ # Now remove the interface
+ self._pd_remove_interfaces(intfs, agent, router, ri)
+
+ @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
+ @mock.patch.object(dibbler.os, 'getpid', return_value=1234)
+ @mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
+ return_value=True)
+ @mock.patch.object(dibbler.os, 'chmod')
+ @mock.patch.object(dibbler.shutil, 'rmtree')
+ @mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
+ def test_pd_remove_gateway(self, mock1, mock2, mock3, mock4,
+ mock_getpid, mock_get_prefix):
+ '''Add one pd-enabled subnet and remove the gateway port
+ Remove the gateway port and check the prefix is removed
+ '''
+ # Initial setup
+ agent, router, ri = self._pd_setup_agent_router()
+
+ # Create one pd-enabled subnet and add router interface
+ intfs = l3_test_common.router_append_pd_enabled_subnet(router)
+ ri.process(agent)
+
+ # Add the gateway interface
+ self._pd_add_gw_interface(agent, router, ri)
+
+ # Get one prefix
+ self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix)
+
+ # Update the router with the new prefix
+ ri.process(agent)
+
+ # Check that radvd is started
+ self._pd_assert_radvd_calls(ri)
+
+ # Now remove the gw interface
+ self._pd_remove_gw_interface(intfs, agent, router, ri)
+
+ # There will be a router update
+ ri.process(agent)
+
+ @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
+ @mock.patch.object(dibbler.os, 'getpid', return_value=1234)
+ @mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
+ return_value=True)
+ @mock.patch.object(dibbler.os, 'chmod')
+ @mock.patch.object(dibbler.shutil, 'rmtree')
+ @mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
+ def test_pd_add_remove_2_subnets(self, mock1, mock2, mock3, mock4,
+ mock_getpid, mock_get_prefix):
+ '''Add and remove two pd-enabled subnets
+ Remove the interfaces by deleting them from the router
+ '''
+ # Initial setup
+ agent, router, ri = self._pd_setup_agent_router()
+
+ # Create 2 pd-enabled subnets and add router interfaces
+ intfs = l3_test_common.router_append_pd_enabled_subnet(router, count=2)
+ ri.process(agent)
+
+ # No client should be started
+ self.assertFalse(self.external_process.call_count)
+ self.assertFalse(mock_get_prefix.call_count)
+
+ # Add the gateway interface
+ self._pd_add_gw_interface(agent, router, ri)
+
+ # Get prefixes
+ self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix)
+
+ # Update the router with the new prefix
+ ri.process(agent)
+
+ # Check that radvd is started and the router port is configured
+ # with the new prefix
+ self._pd_assert_radvd_calls(ri)
+
+ # Now remove the interface
+ self._pd_remove_interfaces(intfs, agent, router, ri)
+
+ @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
+ @mock.patch.object(dibbler.os, 'getpid', return_value=1234)
+ @mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
+ return_value=True)
+ @mock.patch.object(dibbler.os, 'chmod')
+ @mock.patch.object(dibbler.shutil, 'rmtree')
+ @mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
+ def test_pd_remove_gateway_2_subnets(self, mock1, mock2, mock3, mock4,
+ mock_getpid, mock_get_prefix):
+ '''Add one pd-enabled subnet, followed by adding another one
+ Remove the gateway port and check the prefix is removed
+ '''
+ # Initial setup
+ agent, router, ri = self._pd_setup_agent_router()
+
+ # Add the gateway interface
+ self._pd_add_gw_interface(agent, router, ri)
+
+ # Create 1 pd-enabled subnet and add router interface
+ intfs = l3_test_common.router_append_pd_enabled_subnet(router, count=1)
+ ri.process(agent)
+
+ # Get prefixes
+ self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix)
+
+ # Update the router with the new prefix
+ ri.process(agent)
+
+ # Check that radvd is started
+ self._pd_assert_radvd_calls(ri)
+
+ # Now add another interface
+ # Create one pd-enabled subnet and add router interface
+ intfs1 = l3_test_common.router_append_pd_enabled_subnet(router,
+ count=1)
+ ri.process(agent)
+
+ # Get prefixes
+ self._pd_get_prefixes(agent, router, ri, intfs,
+ intfs1, mock_get_prefix)
+
+ # Update the router with the new prefix
+ ri.process(agent)
+
+ # Check that radvd is notified for the new prefix
+ self._pd_assert_radvd_calls(ri)
+
+ # Now remove the gw interface
+ self._pd_remove_gw_interface(intfs + intfs1, agent, router, ri)
+
+ ri.process(agent)
namespace=ns)
self.assertFalse(self.ip_dev().addr.add.called)
+ def test_add_ipv6_addr(self):
+ device_name = 'tap0'
+ cidr = '2001:db8::/64'
+ ns = '12345678-1234-5678-90ab-ba0987654321'
+ bc = BaseChild(self.conf)
+
+ bc.add_ipv6_addr(device_name, cidr, ns)
+
+ self.ip_dev.assert_has_calls(
+ [mock.call(device_name, namespace=ns),
+ mock.call().addr.add(cidr, 'global')])
+
+ def test_delete_ipv6_addr(self):
+ device_name = 'tap0'
+ cidr = '2001:db8::/64'
+ ns = '12345678-1234-5678-90ab-ba0987654321'
+ bc = BaseChild(self.conf)
+
+ bc.delete_ipv6_addr(device_name, cidr, ns)
+
+ self.ip_dev.assert_has_calls(
+ [mock.call(device_name, namespace=ns),
+ mock.call().delete_addr_and_conntrack_state(cidr)])
+
+ def test_delete_ipv6_addr_with_prefix(self):
+ device_name = 'tap0'
+ prefix = '2001:db8::/48'
+ in_cidr = '2001:db8::/64'
+ out_cidr = '2001:db7::/64'
+ ns = '12345678-1234-5678-90ab-ba0987654321'
+ in_addresses = [dict(scope='global',
+ dynamic=False,
+ cidr=in_cidr)]
+ out_addresses = [dict(scope='global',
+ dynamic=False,
+ cidr=out_cidr)]
+ # Initially set the address list to be empty
+ self.ip_dev().addr.list = mock.Mock(return_value=[])
+
+ bc = BaseChild(self.conf)
+
+ # Call delete_v6addr_with_prefix when the address list is empty
+ bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns)
+ # Assert that delete isn't called
+ self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called)
+
+ # Set the address list to contain only an address outside of the range
+ # of the given prefix
+ self.ip_dev().addr.list = mock.Mock(return_value=out_addresses)
+ bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns)
+ # Assert that delete isn't called
+ self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called)
+
+ # Set the address list to contain only an address inside of the range
+ # of the given prefix
+ self.ip_dev().addr.list = mock.Mock(return_value=in_addresses)
+ bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns)
+ # Assert that delete is called
+ self.ip_dev.assert_has_calls(
+ [mock.call(device_name, namespace=ns),
+ mock.call().addr.list(scope='global', filters=['permanent']),
+ mock.call().delete_addr_and_conntrack_state(in_cidr)])
+
+ def test_get_ipv6_llas(self):
+ ns = '12345678-1234-5678-90ab-ba0987654321'
+ addresses = [dict(scope='link',
+ dynamic=False,
+ cidr='fe80:cafe::/64')]
+ self.ip_dev().addr.list = mock.Mock(return_value=addresses)
+ device_name = self.ip_dev().name
+ bc = BaseChild(self.conf)
+
+ llas = bc.get_ipv6_llas(device_name, ns)
+
+ self.assertEqual(addresses, llas)
+ self.ip_dev.assert_has_calls(
+ [mock.call(device_name, namespace=ns),
+ mock.call().addr.list(scope='link', ip_version=6)])
+
class TestOVSInterfaceDriver(TestBase):
neutron-nvsd-agent = neutron.plugins.oneconvergence.agent.nvsd_neutron_agent:main
neutron-openvswitch-agent = neutron.cmd.eventlet.plugins.ovs_neutron_agent:main
neutron-ovs-cleanup = neutron.cmd.ovs_cleanup:main
+ neutron-pd-notify = neutron.cmd.pd_notify:main
neutron-restproxy-agent = neutron.plugins.bigswitch.agent.restproxy_agent:main
neutron-server = neutron.cmd.eventlet.server:main
neutron-rootwrap = oslo_rootwrap.cmd:main
neutron.qos.agent_drivers =
ovs = neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers.qos_driver:QosOVSAgentDriver
sriov = neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers.qos_driver:QosSRIOVAgentDriver
+neutron.agent.linux.pd_drivers =
+ dibbler = neutron.agent.linux.dibbler:PDDibbler
# These are for backwards compat with Icehouse notification_driver configuration values
oslo.messaging.notify.drivers =
neutron.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver