from oslo_serialization import jsonutils
from neutron.common import constants
-from neutron.common import exceptions as exc
from neutron.extensions import portbindings
from neutron.i18n import _LW
from neutron.openstack.common import log
network)
self._binding = binding
self._binding_levels = binding_levels
+ self._segments_to_bind = None
self._new_bound_segment = None
+ self._next_segments_to_bind = None
if original_port:
self._original_binding_levels = self._binding_levels
else:
self._original_binding_levels = None
self._new_port_status = None
+ # The following methods are for use by the ML2 plugin and are not
+ # part of the driver API.
+
+ def _prepare_to_bind(self, segments_to_bind):
+ self._segments_to_bind = segments_to_bind
+ self._new_bound_segment = None
+ self._next_segments_to_bind = None
+
+ def _clear_binding_levels(self):
+ self._binding_levels = []
+
+ def _push_binding_level(self, binding_level):
+ self._binding_levels.append(binding_level)
+
+ def _pop_binding_level(self):
+ return self._binding_levels.pop()
+
+ # The following implement the abstract methods and properties of
+ # the driver API.
+
@property
def current(self):
return self._port
@property
def segments_to_bind(self):
- # TODO(rkukura): Implement for hierarchical port binding.
- return self._network_context.network_segments
+ return self._segments_to_bind
def host_agents(self, agent_type):
return self._plugin.get_agents(self._plugin_context,
self._new_port_status = status
def continue_binding(self, segment_id, next_segments_to_bind):
- # TODO(rkukura): Implement for hierarchical port binding.
- msg = _("Hierarchical port binding not yet implemented")
- raise exc.Invalid(message=msg)
+ # TODO(rkukura) Verify binding allowed, segment in network
+ self._new_bound_segment = segment_id
+ self._next_segments_to_bind = next_segments_to_bind
def allocate_dynamic_segment(self, segment):
network_id = self._network_context.current['id']
LOG = log.getLogger(__name__)
+MAX_BINDING_LEVELS = 10
+
class TypeManager(stevedore.named.NamedExtensionManager):
"""Manage network segment types using drivers."""
binding.
"""
binding = context._binding
- port_id = context._port['id']
LOG.debug("Attempting to bind port %(port)s on host %(host)s "
"for vnic_type %(vnic_type)s with profile %(profile)s",
- {'port': port_id,
- 'host': binding.host,
+ {'port': context.current['id'],
+ 'host': context.host,
'vnic_type': binding.vnic_type,
'profile': binding.profile})
+ context._clear_binding_levels()
+ if not self._bind_port_level(context, 0,
+ context.network.network_segments):
+ binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED
+ LOG.error(_LE("Failed to bind port %(port)s on host %(host)s"),
+ {'port': context.current['id'],
+ 'host': context.host})
+
+ def _bind_port_level(self, context, level, segments_to_bind):
+ binding = context._binding
+ port_id = context._port['id']
+ LOG.debug("Attempting to bind port %(port)s on host %(host)s "
+ "at level %(level)s using segments %(segments)s",
+ {'port': port_id,
+ 'host': context.host,
+ 'level': level,
+ 'segments': segments_to_bind})
+
+ if level == MAX_BINDING_LEVELS:
+ LOG.error(_LE("Exceeded maximum binding levels attempting to bind "
+ "port %(port)s on host %(host)s"),
+ {'port': context.current['id'],
+ 'host': context.host})
+ return False
+
for driver in self.ordered_mech_drivers:
+ if not self._check_driver_to_bind(driver, segments_to_bind,
+ context._binding_levels):
+ continue
try:
+ context._prepare_to_bind(segments_to_bind)
driver.obj.bind_port(context)
segment = context._new_bound_segment
if segment:
- context._binding_levels = [
+ context._push_binding_level(
models.PortBindingLevel(port_id=port_id,
- host=binding.host,
- level=0,
+ host=context.host,
+ level=level,
driver=driver.name,
- segment_id=segment)
- ]
- LOG.debug("Bound port: %(port)s, "
- "host: %(host)s, "
- "vnic_type: %(vnic_type)s, "
- "profile: %(profile)s, "
- "vif_type: %(vif_type)s, "
- "vif_details: %(vif_details)s, "
- "binding_levels: %(binding_levels)s",
- {'port': port_id,
- 'host': context.host,
- 'vnic_type': binding.vnic_type,
- 'profile': binding.profile,
- 'vif_type': binding.vif_type,
- 'vif_details': binding.vif_details,
- 'binding_levels': context.binding_levels})
- return
+ segment_id=segment))
+ next_segments = context._next_segments_to_bind
+ if next_segments:
+ # Continue binding another level.
+ if self._bind_port_level(context, level + 1,
+ next_segments):
+ return True
+ else:
+ context._pop_binding_level()
+ else:
+ # Binding complete.
+ LOG.debug("Bound port: %(port)s, "
+ "host: %(host)s, "
+ "vif_type: %(vif_type)s, "
+ "vif_details: %(vif_details)s, "
+ "binding_levels: %(binding_levels)s",
+ {'port': port_id,
+ 'host': context.host,
+ 'vif_type': binding.vif_type,
+ 'vif_details': binding.vif_details,
+ 'binding_levels': context.binding_levels})
+ return True
except Exception:
LOG.exception(_LE("Mechanism driver %s failed in "
"bind_port"),
{'port': context._port['id'],
'host': binding.host})
+ def _check_driver_to_bind(self, driver, segments_to_bind, binding_levels):
+ # To prevent a possible binding loop, don't try to bind with
+ # this driver if the same driver has already bound at a higher
+ # level to one of the segments we are currently trying to
+ # bind. Note that is is OK for the same driver to bind at
+ # multiple levels using different segments.
+ for level in binding_levels:
+ if (level.driver == driver and
+ level.segment_id in segments_to_bind):
+ return False
+ return True
+
class ExtensionManager(stevedore.named.NamedExtensionManager):
"""Manage extension drivers using drivers."""
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
binding.vif_details = ''
db.clear_binding_levels(session, port_id, original_host)
- mech_context._binding_levels = None
+ mech_context._clear_binding_levels()
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding.vif_type = portbindings.VIF_TYPE_DISTRIBUTED
binding.vif_details = ''
db.clear_binding_levels(session, port_id, original_host)
- mech_context._binding_levels = None
+ mech_context._clear_binding_levels()
binding.host = ''
self._update_port_dict_binding(port, binding)
"(original settings %(original)s) "
"binding levels %(levels)s "
"(original binding levels %(original_levels)s) "
- "on network %(network)s"),
+ "on network %(network)s "
+ "with segments to bind %(segments_to_bind)s"),
{'method': method_name,
'current': context.current,
'original': context.original,
'levels': context.binding_levels,
'original_levels': context.original_binding_levels,
- 'network': network_context.current})
+ 'network': network_context.current,
+ 'segments_to_bind': context.segments_to_bind})
def create_port_precommit(self, context):
self._log_port_call("create_port_precommit", context)
if vif_type in (portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED):
- self._check_unbound(context.binding_levels,
- context.top_bound_segment,
- context.bottom_bound_segment)
+ if (context.segments_to_bind and
+ context.segments_to_bind[0][api.NETWORK_TYPE] == 'vlan'):
+ # Partially bound.
+ self._check_bound(context.binding_levels,
+ context.top_bound_segment,
+ context.bottom_bound_segment)
+ else:
+ self._check_unbound(context.binding_levels,
+ context.top_bound_segment,
+ context.bottom_bound_segment)
assert(context.current['id'] not in self.bound_ports)
else:
self._check_bound(context.binding_levels,
self._check_port_context(context, False)
host = context.host
- segment = context.segments_to_bind[0][api.ID]
+ segment = context.segments_to_bind[0]
+ segment_id = segment[api.ID]
if host == "host-ovs-no_filter":
- context.set_binding(segment, portbindings.VIF_TYPE_OVS,
+ context.set_binding(segment_id, portbindings.VIF_TYPE_OVS,
{portbindings.CAP_PORT_FILTER: False})
self.bound_ports.add(context.current['id'])
elif host == "host-bridge-filter":
- context.set_binding(segment, portbindings.VIF_TYPE_BRIDGE,
+ context.set_binding(segment_id, portbindings.VIF_TYPE_BRIDGE,
{portbindings.CAP_PORT_FILTER: True})
self.bound_ports.add(context.current['id'])
elif host == "host-ovs-filter-active":
- context.set_binding(segment, portbindings.VIF_TYPE_OVS,
+ context.set_binding(segment_id, portbindings.VIF_TYPE_OVS,
{portbindings.CAP_PORT_FILTER: True},
status=const.PORT_STATUS_ACTIVE)
self.bound_ports.add(context.current['id'])
+ elif host == "host-hierarchical":
+ segment_type = segment[api.NETWORK_TYPE]
+ if segment_type == 'local':
+ next_segment = context.allocate_dynamic_segment(
+ {api.NETWORK_TYPE: 'vlan',
+ api.PHYSICAL_NETWORK: 'physnet1'}
+ )
+ context.continue_binding(segment_id, [next_segment])
+ elif segment_type == 'vlan':
+ context.set_binding(segment_id,
+ portbindings.VIF_TYPE_OVS,
+ {portbindings.CAP_PORT_FILTER: False})
+ self.bound_ports.add(context.current['id'])
config.cfg.CONF.set_override('mechanism_drivers',
['logger', 'test'],
'ml2')
+ config.cfg.CONF.set_override('network_vlan_ranges',
+ ['physnet1:1000:1099'],
+ group='ml2_type_vlan')
super(PortBindingTestCase, self).setUp(PLUGIN_NAME)
self.port_create_status = 'DOWN'
self.plugin = manager.NeutronManager.get_plugin()
self.assertEqual(port_status, 'DOWN')
def _test_port_binding(self, host, vif_type, has_port_filter, bound,
- status=None):
+ status=None, network_type='local'):
mac_address = 'aa:aa:aa:aa:aa:aa'
host_arg = {portbindings.HOST_ID: host,
'mac_address': mac_address}
details = self.plugin.endpoints[0].get_device_details(
neutron_context, agent_id="theAgentId", device=port_id)
if bound:
- self.assertEqual(details['network_type'], 'local')
+ self.assertEqual(details['network_type'], network_type)
self.assertEqual(mac_address, details['mac_address'])
else:
self.assertNotIn('network_type', details)
self.assertIsNone(
self.plugin.get_bound_port_context(ctx, port['port']['id']))
+ def test_hierarchical_binding(self):
+ self._test_port_binding("host-hierarchical",
+ portbindings.VIF_TYPE_OVS,
+ False, True, network_type='vlan')
+
def _test_update_port_binding(self, host, new_host=None):
with mock.patch.object(self.plugin,
'_notify_port_updated') as notify_mock: