]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
ML2: Hierarchical port binding
authorRobert Kukura <kukura@noironetworks.com>
Fri, 22 Aug 2014 09:01:03 +0000 (05:01 -0400)
committerHenry Gessau <gessau@cisco.com>
Sat, 14 Feb 2015 04:36:29 +0000 (04:36 +0000)
The ML2 port binding logic is modified to support hierarchical
bindings. If a mechanism driver's bind_port() calls continue_binding()
rather than set_binding(), the supplied dynamic segments are used for
another level of binding.

Gerrit Spec: https://review.openstack.org/#/c/139886/

Implements: blueprint ml2-hierarchical-port-binding

Change-Id: Id741c2d6c443fa0eac5ecf8d964b3fc14b5d3abe

neutron/plugins/ml2/driver_context.py
neutron/plugins/ml2/managers.py
neutron/plugins/ml2/plugin.py
neutron/tests/unit/ml2/drivers/mechanism_logger.py
neutron/tests/unit/ml2/drivers/mechanism_test.py
neutron/tests/unit/ml2/test_port_binding.py

index 3bd3d58fe3d0d8b5e96e550f94767ca04ed7e829..88ff031db296b4e0d4525cf9674250e775707c71 100644 (file)
@@ -16,7 +16,6 @@
 from oslo_serialization import jsonutils
 
 from neutron.common import constants
-from neutron.common import exceptions as exc
 from neutron.extensions import portbindings
 from neutron.i18n import _LW
 from neutron.openstack.common import log
@@ -86,13 +85,35 @@ class PortContext(MechanismDriverContext, api.PortContext):
                                                network)
         self._binding = binding
         self._binding_levels = binding_levels
+        self._segments_to_bind = None
         self._new_bound_segment = None
+        self._next_segments_to_bind = None
         if original_port:
             self._original_binding_levels = self._binding_levels
         else:
             self._original_binding_levels = None
         self._new_port_status = None
 
+    # The following methods are for use by the ML2 plugin and are not
+    # part of the driver API.
+
+    def _prepare_to_bind(self, segments_to_bind):
+        self._segments_to_bind = segments_to_bind
+        self._new_bound_segment = None
+        self._next_segments_to_bind = None
+
+    def _clear_binding_levels(self):
+        self._binding_levels = []
+
+    def _push_binding_level(self, binding_level):
+        self._binding_levels.append(binding_level)
+
+    def _pop_binding_level(self):
+        return self._binding_levels.pop()
+
+    # The following implement the abstract methods and properties of
+    # the driver API.
+
     @property
     def current(self):
         return self._port
@@ -178,8 +199,7 @@ class PortContext(MechanismDriverContext, api.PortContext):
 
     @property
     def segments_to_bind(self):
-        # TODO(rkukura): Implement for hierarchical port binding.
-        return self._network_context.network_segments
+        return self._segments_to_bind
 
     def host_agents(self, agent_type):
         return self._plugin.get_agents(self._plugin_context,
@@ -195,9 +215,9 @@ class PortContext(MechanismDriverContext, api.PortContext):
         self._new_port_status = status
 
     def continue_binding(self, segment_id, next_segments_to_bind):
-        # TODO(rkukura): Implement for hierarchical port binding.
-        msg = _("Hierarchical port binding not yet implemented")
-        raise exc.Invalid(message=msg)
+        # TODO(rkukura) Verify binding allowed, segment in network
+        self._new_bound_segment = segment_id
+        self._next_segments_to_bind = next_segments_to_bind
 
     def allocate_dynamic_segment(self, segment):
         network_id = self._network_context.current['id']
index 20f8070e713416c6daa19ca1b5e0e319c34ddc98..94c00a8d12b83d63ce76d06dc3ab0b7355bf9746 100644 (file)
@@ -30,6 +30,8 @@ from neutron.plugins.ml2 import models
 
 LOG = log.getLogger(__name__)
 
+MAX_BINDING_LEVELS = 10
+
 
 class TypeManager(stevedore.named.NamedExtensionManager):
     """Manage network segment types using drivers."""
@@ -557,40 +559,73 @@ class MechanismManager(stevedore.named.NamedExtensionManager):
         binding.
         """
         binding = context._binding
-        port_id = context._port['id']
         LOG.debug("Attempting to bind port %(port)s on host %(host)s "
                   "for vnic_type %(vnic_type)s with profile %(profile)s",
-                  {'port': port_id,
-                   'host': binding.host,
+                  {'port': context.current['id'],
+                   'host': context.host,
                    'vnic_type': binding.vnic_type,
                    'profile': binding.profile})
+        context._clear_binding_levels()
+        if not self._bind_port_level(context, 0,
+                                     context.network.network_segments):
+            binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED
+            LOG.error(_LE("Failed to bind port %(port)s on host %(host)s"),
+                      {'port': context.current['id'],
+                       'host': context.host})
+
+    def _bind_port_level(self, context, level, segments_to_bind):
+        binding = context._binding
+        port_id = context._port['id']
+        LOG.debug("Attempting to bind port %(port)s on host %(host)s "
+                  "at level %(level)s using segments %(segments)s",
+                  {'port': port_id,
+                   'host': context.host,
+                   'level': level,
+                   'segments': segments_to_bind})
+
+        if level == MAX_BINDING_LEVELS:
+            LOG.error(_LE("Exceeded maximum binding levels attempting to bind "
+                        "port %(port)s on host %(host)s"),
+                      {'port': context.current['id'],
+                       'host': context.host})
+            return False
+
         for driver in self.ordered_mech_drivers:
+            if not self._check_driver_to_bind(driver, segments_to_bind,
+                                              context._binding_levels):
+                continue
             try:
+                context._prepare_to_bind(segments_to_bind)
                 driver.obj.bind_port(context)
                 segment = context._new_bound_segment
                 if segment:
-                    context._binding_levels = [
+                    context._push_binding_level(
                         models.PortBindingLevel(port_id=port_id,
-                                                host=binding.host,
-                                                level=0,
+                                                host=context.host,
+                                                level=level,
                                                 driver=driver.name,
-                                                segment_id=segment)
-                    ]
-                    LOG.debug("Bound port: %(port)s, "
-                              "host: %(host)s, "
-                              "vnic_type: %(vnic_type)s, "
-                              "profile: %(profile)s, "
-                              "vif_type: %(vif_type)s, "
-                              "vif_details: %(vif_details)s, "
-                              "binding_levels: %(binding_levels)s",
-                              {'port': port_id,
-                               'host': context.host,
-                               'vnic_type': binding.vnic_type,
-                               'profile': binding.profile,
-                               'vif_type': binding.vif_type,
-                               'vif_details': binding.vif_details,
-                               'binding_levels': context.binding_levels})
-                    return
+                                                segment_id=segment))
+                    next_segments = context._next_segments_to_bind
+                    if next_segments:
+                        # Continue binding another level.
+                        if self._bind_port_level(context, level + 1,
+                                                 next_segments):
+                            return True
+                        else:
+                            context._pop_binding_level()
+                    else:
+                        # Binding complete.
+                        LOG.debug("Bound port: %(port)s, "
+                                  "host: %(host)s, "
+                                  "vif_type: %(vif_type)s, "
+                                  "vif_details: %(vif_details)s, "
+                                  "binding_levels: %(binding_levels)s",
+                                  {'port': port_id,
+                                   'host': context.host,
+                                   'vif_type': binding.vif_type,
+                                   'vif_details': binding.vif_details,
+                                   'binding_levels': context.binding_levels})
+                        return True
             except Exception:
                 LOG.exception(_LE("Mechanism driver %s failed in "
                                   "bind_port"),
@@ -600,6 +635,18 @@ class MechanismManager(stevedore.named.NamedExtensionManager):
                   {'port': context._port['id'],
                    'host': binding.host})
 
+    def _check_driver_to_bind(self, driver, segments_to_bind, binding_levels):
+        # To prevent a possible binding loop, don't try to bind with
+        # this driver if the same driver has already bound at a higher
+        # level to one of the segments we are currently trying to
+        # bind. Note that is is OK for the same driver to bind at
+        # multiple levels using different segments.
+        for level in binding_levels:
+            if (level.driver == driver and
+                level.segment_id in segments_to_bind):
+                return False
+        return True
+
 
 class ExtensionManager(stevedore.named.NamedExtensionManager):
     """Manage extension drivers using drivers."""
index bcc67c50c0cb72a72c06d0f351db14caf886b137..85b87c76af2af059c9c091167f9c9a19ed50965f 100644 (file)
@@ -231,13 +231,13 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
             binding.vif_type = portbindings.VIF_TYPE_UNBOUND
             binding.vif_details = ''
             db.clear_binding_levels(session, port_id, original_host)
-            mech_context._binding_levels = None
+            mech_context._clear_binding_levels()
 
         if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
             binding.vif_type = portbindings.VIF_TYPE_DISTRIBUTED
             binding.vif_details = ''
             db.clear_binding_levels(session, port_id, original_host)
-            mech_context._binding_levels = None
+            mech_context._clear_binding_levels()
             binding.host = ''
 
         self._update_port_dict_binding(port, binding)
index 2199620806184eecb57a0bfa45decfa2c98a75fb..da4edbb981f67de1ad579bd1c8b5fc9bf6973dfc 100644 (file)
@@ -86,13 +86,15 @@ class LoggerMechanismDriver(api.MechanismDriver):
                    "(original settings %(original)s) "
                    "binding levels %(levels)s "
                    "(original binding levels %(original_levels)s) "
-                   "on network %(network)s"),
+                   "on network %(network)s "
+                   "with segments to bind %(segments_to_bind)s"),
                  {'method': method_name,
                   'current': context.current,
                   'original': context.original,
                   'levels': context.binding_levels,
                   'original_levels': context.original_binding_levels,
-                  'network': network_context.current})
+                  'network': network_context.current,
+                  'segments_to_bind': context.segments_to_bind})
 
     def create_port_precommit(self, context):
         self._log_port_call("create_port_precommit", context)
index 324a873bf2ccd56a47e62f252fd684ed401eb508..2c269491751c854a8b3265b507eb3582a6cc08a7 100644 (file)
@@ -91,9 +91,16 @@ class TestMechanismDriver(api.MechanismDriver):
 
         if vif_type in (portbindings.VIF_TYPE_UNBOUND,
                         portbindings.VIF_TYPE_BINDING_FAILED):
-            self._check_unbound(context.binding_levels,
-                                context.top_bound_segment,
-                                context.bottom_bound_segment)
+            if (context.segments_to_bind and
+                context.segments_to_bind[0][api.NETWORK_TYPE] == 'vlan'):
+                # Partially bound.
+                self._check_bound(context.binding_levels,
+                                  context.top_bound_segment,
+                                  context.bottom_bound_segment)
+            else:
+                self._check_unbound(context.binding_levels,
+                                    context.top_bound_segment,
+                                    context.bottom_bound_segment)
             assert(context.current['id'] not in self.bound_ports)
         else:
             self._check_bound(context.binding_levels,
@@ -168,17 +175,31 @@ class TestMechanismDriver(api.MechanismDriver):
         self._check_port_context(context, False)
 
         host = context.host
-        segment = context.segments_to_bind[0][api.ID]
+        segment = context.segments_to_bind[0]
+        segment_id = segment[api.ID]
         if host == "host-ovs-no_filter":
-            context.set_binding(segment, portbindings.VIF_TYPE_OVS,
+            context.set_binding(segment_id, portbindings.VIF_TYPE_OVS,
                                 {portbindings.CAP_PORT_FILTER: False})
             self.bound_ports.add(context.current['id'])
         elif host == "host-bridge-filter":
-            context.set_binding(segment, portbindings.VIF_TYPE_BRIDGE,
+            context.set_binding(segment_id, portbindings.VIF_TYPE_BRIDGE,
                                 {portbindings.CAP_PORT_FILTER: True})
             self.bound_ports.add(context.current['id'])
         elif host == "host-ovs-filter-active":
-            context.set_binding(segment, portbindings.VIF_TYPE_OVS,
+            context.set_binding(segment_id, portbindings.VIF_TYPE_OVS,
                                 {portbindings.CAP_PORT_FILTER: True},
                                 status=const.PORT_STATUS_ACTIVE)
             self.bound_ports.add(context.current['id'])
+        elif host == "host-hierarchical":
+            segment_type = segment[api.NETWORK_TYPE]
+            if segment_type == 'local':
+                next_segment = context.allocate_dynamic_segment(
+                    {api.NETWORK_TYPE: 'vlan',
+                     api.PHYSICAL_NETWORK: 'physnet1'}
+                )
+                context.continue_binding(segment_id, [next_segment])
+            elif segment_type == 'vlan':
+                context.set_binding(segment_id,
+                                    portbindings.VIF_TYPE_OVS,
+                                    {portbindings.CAP_PORT_FILTER: False})
+                self.bound_ports.add(context.current['id'])
index 4028b7ed25c1b5db537812e69f87e983c252582d..ae88ab67c2ffbfa623b71549992fdabadbcd1afa 100644 (file)
@@ -37,6 +37,9 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
         config.cfg.CONF.set_override('mechanism_drivers',
                                      ['logger', 'test'],
                                      'ml2')
+        config.cfg.CONF.set_override('network_vlan_ranges',
+                                     ['physnet1:1000:1099'],
+                                     group='ml2_type_vlan')
         super(PortBindingTestCase, self).setUp(PLUGIN_NAME)
         self.port_create_status = 'DOWN'
         self.plugin = manager.NeutronManager.get_plugin()
@@ -55,7 +58,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
             self.assertEqual(port_status, 'DOWN')
 
     def _test_port_binding(self, host, vif_type, has_port_filter, bound,
-                           status=None):
+                           status=None, network_type='local'):
         mac_address = 'aa:aa:aa:aa:aa:aa'
         host_arg = {portbindings.HOST_ID: host,
                     'mac_address': mac_address}
@@ -68,7 +71,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
             details = self.plugin.endpoints[0].get_device_details(
                 neutron_context, agent_id="theAgentId", device=port_id)
             if bound:
-                self.assertEqual(details['network_type'], 'local')
+                self.assertEqual(details['network_type'], network_type)
                 self.assertEqual(mac_address, details['mac_address'])
             else:
                 self.assertNotIn('network_type', details)
@@ -108,6 +111,11 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
             self.assertIsNone(
                 self.plugin.get_bound_port_context(ctx, port['port']['id']))
 
+    def test_hierarchical_binding(self):
+        self._test_port_binding("host-hierarchical",
+                                portbindings.VIF_TYPE_OVS,
+                                False, True, network_type='vlan')
+
     def _test_update_port_binding(self, host, new_host=None):
         with mock.patch.object(self.plugin,
                                '_notify_port_updated') as notify_mock: