From d675fddb579c26991cdab58d2b8a034ad161f494 Mon Sep 17 00:00:00 2001 From: Robert Kukura Date: Fri, 22 Aug 2014 05:01:03 -0400 Subject: [PATCH] ML2: Hierarchical port binding The ML2 port binding logic is modified to support hierarchical bindings. If a mechanism driver's bind_port() calls continue_binding() rather than set_binding(), the supplied dynamic segments are used for another level of binding. Gerrit Spec: https://review.openstack.org/#/c/139886/ Implements: blueprint ml2-hierarchical-port-binding Change-Id: Id741c2d6c443fa0eac5ecf8d964b3fc14b5d3abe --- neutron/plugins/ml2/driver_context.py | 32 +++++-- neutron/plugins/ml2/managers.py | 93 ++++++++++++++----- neutron/plugins/ml2/plugin.py | 4 +- .../unit/ml2/drivers/mechanism_logger.py | 6 +- .../tests/unit/ml2/drivers/mechanism_test.py | 35 +++++-- neutron/tests/unit/ml2/test_port_binding.py | 12 ++- 6 files changed, 140 insertions(+), 42 deletions(-) diff --git a/neutron/plugins/ml2/driver_context.py b/neutron/plugins/ml2/driver_context.py index 3bd3d58fe..88ff031db 100644 --- a/neutron/plugins/ml2/driver_context.py +++ b/neutron/plugins/ml2/driver_context.py @@ -16,7 +16,6 @@ from oslo_serialization import jsonutils from neutron.common import constants -from neutron.common import exceptions as exc from neutron.extensions import portbindings from neutron.i18n import _LW from neutron.openstack.common import log @@ -86,13 +85,35 @@ class PortContext(MechanismDriverContext, api.PortContext): network) self._binding = binding self._binding_levels = binding_levels + self._segments_to_bind = None self._new_bound_segment = None + self._next_segments_to_bind = None if original_port: self._original_binding_levels = self._binding_levels else: self._original_binding_levels = None self._new_port_status = None + # The following methods are for use by the ML2 plugin and are not + # part of the driver API. + + def _prepare_to_bind(self, segments_to_bind): + self._segments_to_bind = segments_to_bind + self._new_bound_segment = None + self._next_segments_to_bind = None + + def _clear_binding_levels(self): + self._binding_levels = [] + + def _push_binding_level(self, binding_level): + self._binding_levels.append(binding_level) + + def _pop_binding_level(self): + return self._binding_levels.pop() + + # The following implement the abstract methods and properties of + # the driver API. + @property def current(self): return self._port @@ -178,8 +199,7 @@ class PortContext(MechanismDriverContext, api.PortContext): @property def segments_to_bind(self): - # TODO(rkukura): Implement for hierarchical port binding. - return self._network_context.network_segments + return self._segments_to_bind def host_agents(self, agent_type): return self._plugin.get_agents(self._plugin_context, @@ -195,9 +215,9 @@ class PortContext(MechanismDriverContext, api.PortContext): self._new_port_status = status def continue_binding(self, segment_id, next_segments_to_bind): - # TODO(rkukura): Implement for hierarchical port binding. - msg = _("Hierarchical port binding not yet implemented") - raise exc.Invalid(message=msg) + # TODO(rkukura) Verify binding allowed, segment in network + self._new_bound_segment = segment_id + self._next_segments_to_bind = next_segments_to_bind def allocate_dynamic_segment(self, segment): network_id = self._network_context.current['id'] diff --git a/neutron/plugins/ml2/managers.py b/neutron/plugins/ml2/managers.py index 20f8070e7..94c00a8d1 100644 --- a/neutron/plugins/ml2/managers.py +++ b/neutron/plugins/ml2/managers.py @@ -30,6 +30,8 @@ from neutron.plugins.ml2 import models LOG = log.getLogger(__name__) +MAX_BINDING_LEVELS = 10 + class TypeManager(stevedore.named.NamedExtensionManager): """Manage network segment types using drivers.""" @@ -557,40 +559,73 @@ class MechanismManager(stevedore.named.NamedExtensionManager): binding. """ binding = context._binding - port_id = context._port['id'] LOG.debug("Attempting to bind port %(port)s on host %(host)s " "for vnic_type %(vnic_type)s with profile %(profile)s", - {'port': port_id, - 'host': binding.host, + {'port': context.current['id'], + 'host': context.host, 'vnic_type': binding.vnic_type, 'profile': binding.profile}) + context._clear_binding_levels() + if not self._bind_port_level(context, 0, + context.network.network_segments): + binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED + LOG.error(_LE("Failed to bind port %(port)s on host %(host)s"), + {'port': context.current['id'], + 'host': context.host}) + + def _bind_port_level(self, context, level, segments_to_bind): + binding = context._binding + port_id = context._port['id'] + LOG.debug("Attempting to bind port %(port)s on host %(host)s " + "at level %(level)s using segments %(segments)s", + {'port': port_id, + 'host': context.host, + 'level': level, + 'segments': segments_to_bind}) + + if level == MAX_BINDING_LEVELS: + LOG.error(_LE("Exceeded maximum binding levels attempting to bind " + "port %(port)s on host %(host)s"), + {'port': context.current['id'], + 'host': context.host}) + return False + for driver in self.ordered_mech_drivers: + if not self._check_driver_to_bind(driver, segments_to_bind, + context._binding_levels): + continue try: + context._prepare_to_bind(segments_to_bind) driver.obj.bind_port(context) segment = context._new_bound_segment if segment: - context._binding_levels = [ + context._push_binding_level( models.PortBindingLevel(port_id=port_id, - host=binding.host, - level=0, + host=context.host, + level=level, driver=driver.name, - segment_id=segment) - ] - LOG.debug("Bound port: %(port)s, " - "host: %(host)s, " - "vnic_type: %(vnic_type)s, " - "profile: %(profile)s, " - "vif_type: %(vif_type)s, " - "vif_details: %(vif_details)s, " - "binding_levels: %(binding_levels)s", - {'port': port_id, - 'host': context.host, - 'vnic_type': binding.vnic_type, - 'profile': binding.profile, - 'vif_type': binding.vif_type, - 'vif_details': binding.vif_details, - 'binding_levels': context.binding_levels}) - return + segment_id=segment)) + next_segments = context._next_segments_to_bind + if next_segments: + # Continue binding another level. + if self._bind_port_level(context, level + 1, + next_segments): + return True + else: + context._pop_binding_level() + else: + # Binding complete. + LOG.debug("Bound port: %(port)s, " + "host: %(host)s, " + "vif_type: %(vif_type)s, " + "vif_details: %(vif_details)s, " + "binding_levels: %(binding_levels)s", + {'port': port_id, + 'host': context.host, + 'vif_type': binding.vif_type, + 'vif_details': binding.vif_details, + 'binding_levels': context.binding_levels}) + return True except Exception: LOG.exception(_LE("Mechanism driver %s failed in " "bind_port"), @@ -600,6 +635,18 @@ class MechanismManager(stevedore.named.NamedExtensionManager): {'port': context._port['id'], 'host': binding.host}) + def _check_driver_to_bind(self, driver, segments_to_bind, binding_levels): + # To prevent a possible binding loop, don't try to bind with + # this driver if the same driver has already bound at a higher + # level to one of the segments we are currently trying to + # bind. Note that is is OK for the same driver to bind at + # multiple levels using different segments. + for level in binding_levels: + if (level.driver == driver and + level.segment_id in segments_to_bind): + return False + return True + class ExtensionManager(stevedore.named.NamedExtensionManager): """Manage extension drivers using drivers.""" diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index bcc67c50c..85b87c76a 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -231,13 +231,13 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, binding.vif_type = portbindings.VIF_TYPE_UNBOUND binding.vif_details = '' db.clear_binding_levels(session, port_id, original_host) - mech_context._binding_levels = None + mech_context._clear_binding_levels() if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: binding.vif_type = portbindings.VIF_TYPE_DISTRIBUTED binding.vif_details = '' db.clear_binding_levels(session, port_id, original_host) - mech_context._binding_levels = None + mech_context._clear_binding_levels() binding.host = '' self._update_port_dict_binding(port, binding) diff --git a/neutron/tests/unit/ml2/drivers/mechanism_logger.py b/neutron/tests/unit/ml2/drivers/mechanism_logger.py index 219962080..da4edbb98 100644 --- a/neutron/tests/unit/ml2/drivers/mechanism_logger.py +++ b/neutron/tests/unit/ml2/drivers/mechanism_logger.py @@ -86,13 +86,15 @@ class LoggerMechanismDriver(api.MechanismDriver): "(original settings %(original)s) " "binding levels %(levels)s " "(original binding levels %(original_levels)s) " - "on network %(network)s"), + "on network %(network)s " + "with segments to bind %(segments_to_bind)s"), {'method': method_name, 'current': context.current, 'original': context.original, 'levels': context.binding_levels, 'original_levels': context.original_binding_levels, - 'network': network_context.current}) + 'network': network_context.current, + 'segments_to_bind': context.segments_to_bind}) def create_port_precommit(self, context): self._log_port_call("create_port_precommit", context) diff --git a/neutron/tests/unit/ml2/drivers/mechanism_test.py b/neutron/tests/unit/ml2/drivers/mechanism_test.py index 324a873bf..2c2694917 100644 --- a/neutron/tests/unit/ml2/drivers/mechanism_test.py +++ b/neutron/tests/unit/ml2/drivers/mechanism_test.py @@ -91,9 +91,16 @@ class TestMechanismDriver(api.MechanismDriver): if vif_type in (portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED): - self._check_unbound(context.binding_levels, - context.top_bound_segment, - context.bottom_bound_segment) + if (context.segments_to_bind and + context.segments_to_bind[0][api.NETWORK_TYPE] == 'vlan'): + # Partially bound. + self._check_bound(context.binding_levels, + context.top_bound_segment, + context.bottom_bound_segment) + else: + self._check_unbound(context.binding_levels, + context.top_bound_segment, + context.bottom_bound_segment) assert(context.current['id'] not in self.bound_ports) else: self._check_bound(context.binding_levels, @@ -168,17 +175,31 @@ class TestMechanismDriver(api.MechanismDriver): self._check_port_context(context, False) host = context.host - segment = context.segments_to_bind[0][api.ID] + segment = context.segments_to_bind[0] + segment_id = segment[api.ID] if host == "host-ovs-no_filter": - context.set_binding(segment, portbindings.VIF_TYPE_OVS, + context.set_binding(segment_id, portbindings.VIF_TYPE_OVS, {portbindings.CAP_PORT_FILTER: False}) self.bound_ports.add(context.current['id']) elif host == "host-bridge-filter": - context.set_binding(segment, portbindings.VIF_TYPE_BRIDGE, + context.set_binding(segment_id, portbindings.VIF_TYPE_BRIDGE, {portbindings.CAP_PORT_FILTER: True}) self.bound_ports.add(context.current['id']) elif host == "host-ovs-filter-active": - context.set_binding(segment, portbindings.VIF_TYPE_OVS, + context.set_binding(segment_id, portbindings.VIF_TYPE_OVS, {portbindings.CAP_PORT_FILTER: True}, status=const.PORT_STATUS_ACTIVE) self.bound_ports.add(context.current['id']) + elif host == "host-hierarchical": + segment_type = segment[api.NETWORK_TYPE] + if segment_type == 'local': + next_segment = context.allocate_dynamic_segment( + {api.NETWORK_TYPE: 'vlan', + api.PHYSICAL_NETWORK: 'physnet1'} + ) + context.continue_binding(segment_id, [next_segment]) + elif segment_type == 'vlan': + context.set_binding(segment_id, + portbindings.VIF_TYPE_OVS, + {portbindings.CAP_PORT_FILTER: False}) + self.bound_ports.add(context.current['id']) diff --git a/neutron/tests/unit/ml2/test_port_binding.py b/neutron/tests/unit/ml2/test_port_binding.py index 4028b7ed2..ae88ab67c 100644 --- a/neutron/tests/unit/ml2/test_port_binding.py +++ b/neutron/tests/unit/ml2/test_port_binding.py @@ -37,6 +37,9 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase): config.cfg.CONF.set_override('mechanism_drivers', ['logger', 'test'], 'ml2') + config.cfg.CONF.set_override('network_vlan_ranges', + ['physnet1:1000:1099'], + group='ml2_type_vlan') super(PortBindingTestCase, self).setUp(PLUGIN_NAME) self.port_create_status = 'DOWN' self.plugin = manager.NeutronManager.get_plugin() @@ -55,7 +58,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase): self.assertEqual(port_status, 'DOWN') def _test_port_binding(self, host, vif_type, has_port_filter, bound, - status=None): + status=None, network_type='local'): mac_address = 'aa:aa:aa:aa:aa:aa' host_arg = {portbindings.HOST_ID: host, 'mac_address': mac_address} @@ -68,7 +71,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase): details = self.plugin.endpoints[0].get_device_details( neutron_context, agent_id="theAgentId", device=port_id) if bound: - self.assertEqual(details['network_type'], 'local') + self.assertEqual(details['network_type'], network_type) self.assertEqual(mac_address, details['mac_address']) else: self.assertNotIn('network_type', details) @@ -108,6 +111,11 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase): self.assertIsNone( self.plugin.get_bound_port_context(ctx, port['port']['id'])) + def test_hierarchical_binding(self): + self._test_port_binding("host-hierarchical", + portbindings.VIF_TYPE_OVS, + False, True, network_type='vlan') + def _test_update_port_binding(self, host, new_host=None): with mock.patch.object(self.plugin, '_notify_port_updated') as notify_mock: -- 2.45.2