@property
def status(self):
+ # REVISIT(rkukura): Eliminate special DVR case as part of
+ # resolving bug 1367391?
+ if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE:
+ return self._binding.status
+
return self._port['status']
@property
@property
def host(self):
+ # REVISIT(rkukura): Eliminate special DVR case as part of
+ # resolving bug 1367391?
+ if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE:
+ return self._binding.host
+
return self._port.get(portbindings.HOST_ID)
@property
def release_dynamic_segment(self, segment_id):
return self._plugin.type_manager.release_dynamic_segment(
self._plugin_context.session, segment_id)
-
-
-class DvrPortContext(PortContext):
-
- def __init__(self, plugin, plugin_context, port, network, binding,
- original_port=None):
- super(DvrPortContext, self).__init__(
- plugin, plugin_context, port, network, binding,
- original_port=original_port)
-
- @property
- def host(self):
- if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE:
- return self._binding.host
-
- return super(DvrPortContext, self).host
-
- @property
- def status(self):
- if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE:
- return self._binding.status
-
- return super(DvrPortContext, self).status
oport = self._make_port_dict(port_db)
port = self._make_port_dict(port_db)
network = self.get_network(plugin_context, port['network_id'])
+ if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
+ # REVISIT(rkukura): The PortBinding instance from the
+ # ml2_port_bindings table, returned as cur_binding
+ # from db.get_locked_port_and_binding() above, is
+ # currently not used for DVR distributed ports, and is
+ # replaced here with the DVRPortBinding instance from
+ # the ml2_dvr_port_bindings table specific to the host
+ # on which the distributed port is being bound. It
+ # would be possible to optimize this code to avoid
+ # fetching the PortBinding instance in the DVR case,
+ # and even to avoid creating the unused entry in the
+ # ml2_port_bindings table. But the upcoming resolution
+ # for bug 1367391 will eliminate the
+ # ml2_dvr_port_bindings table, use the
+ # ml2_port_bindings table to store non-host-specific
+ # fields for both distributed and non-distributed
+ # ports, and introduce a new ml2_port_binding_hosts
+ # table for the fields that need to be host-specific
+ # in the distributed case. Since the PortBinding
+ # instance will then be needed, it does not make sense
+ # to optimize this code to avoid fetching it.
+ cur_binding = db.get_dvr_port_binding_by_host(
+ session, port_id, orig_binding.host)
cur_context = driver_context.PortContext(
self, plugin_context, port, network, cur_binding,
original_port=oport)
binding = db.ensure_dvr_port_binding(
session, id, host, router_id=device_id)
network = self.get_network(context, orig_port['network_id'])
- mech_context = driver_context.DvrPortContext(self,
+ mech_context = driver_context.PortContext(self,
context, orig_port, network,
binding, original_port=orig_port)
self._process_dvr_port_binding(mech_context, context, attrs)
- self.mechanism_manager.bind_port(mech_context)
- # Now try to commit result of attempting to bind the port.
- self._commit_dvr_port_binding(mech_context._plugin_context,
- orig_port['id'],
- host,
- mech_context)
-
- def _commit_dvr_port_binding(self, plugin_context,
- port_id, host,
- mech_context):
- session = plugin_context.session
- new_binding = mech_context._binding
- with contextlib.nested(lockutils.lock('db-access'),
- session.begin(subtransactions=True)):
- # Get the current port state and build a new PortContext
- # reflecting this state as original state for subsequent
- # mechanism driver update_port_*commit() calls.
- cur_binding = db.get_dvr_port_binding_by_host(session,
- port_id,
- host)
- if not cur_binding:
- LOG.info(_LI("Binding info for port %s was not found, "
- "it might have been deleted already."),
- port_id)
- return
- # Commit our binding results only if port has not been
- # successfully bound concurrently by another thread or
- # process and no binding inputs have been changed.
- commit = ((cur_binding.vif_type in
- [portbindings.VIF_TYPE_UNBOUND,
- portbindings.VIF_TYPE_BINDING_FAILED]) and
- new_binding.host == cur_binding.host and
- new_binding.vnic_type == cur_binding.vnic_type and
- new_binding.profile == cur_binding.profile)
-
- if commit:
- # Update the port's binding state with our binding
- # results.
- cur_binding.vif_type = new_binding.vif_type
- cur_binding.vif_details = new_binding.vif_details
- cur_binding.driver = new_binding.driver
- cur_binding.segment = new_binding.segment
+ self._bind_port_if_needed(mech_context)
def delete_port(self, context, id, l3_port_check=True):
LOG.debug("Deleting port %s", id)
if device_owner == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_dvr_port_bindings(context.session, id)
for bind in bindings:
- mech_context = driver_context.DvrPortContext(
+ mech_context = driver_context.PortContext(
self, context, port, network, bind)
self.mechanism_manager.delete_port_precommit(mech_context)
bound_mech_contexts.append(mech_context)
LOG.error(_LE("Binding info for DVR port %s not found"),
port_id)
return None
- port_context = driver_context.DvrPortContext(
+ port_context = driver_context.PortContext(
self, plugin_context, port, network, binding)
else:
# since eager loads are disabled in port_db query
original_port['network_id'])
port.status = db.generate_dvr_port_status(session, port['id'])
updated_port = self._make_port_dict(port)
- mech_context = (driver_context.DvrPortContext(
+ mech_context = (driver_context.PortContext(
self, context, updated_port, network,
binding, original_port=original_port))
self.mechanism_manager.update_port_precommit(mech_context)
from neutron.tests import base
-class TestDvrPortContext(base.BaseTestCase):
+class TestPortContext(base.BaseTestCase):
+
+ # REVISIT(rkukura): These was originally for DvrPortContext tests,
+ # but DvrPortContext functionality has been folded into the
+ # regular PortContext class. Tests for non-DVR-specific
+ # functionality are needed here as well.
def test_host(self):
plugin = mock.Mock()
binding.host = 'foohost'
with mock.patch.object(driver_context.db, 'get_network_segments'):
- ctx = driver_context.DvrPortContext(plugin,
- plugin_context,
- port,
- network,
- binding)
+ ctx = driver_context.PortContext(plugin,
+ plugin_context,
+ port,
+ network,
+ binding)
self.assertEqual('foohost', ctx.host)
def test_host_super(self):
binding.host = 'foohost'
with mock.patch.object(driver_context.db, 'get_network_segments'):
- ctx = driver_context.DvrPortContext(plugin,
- plugin_context,
- port,
- network,
- binding)
+ ctx = driver_context.PortContext(plugin,
+ plugin_context,
+ port,
+ network,
+ binding)
self.assertEqual('host', ctx.host)
def test_status(self):
binding.status = 'foostatus'
with mock.patch.object(driver_context.db, 'get_network_segments'):
- ctx = driver_context.DvrPortContext(plugin,
- plugin_context,
- port,
- network,
- binding)
+ ctx = driver_context.PortContext(plugin,
+ plugin_context,
+ port,
+ network,
+ binding)
self.assertEqual('foostatus', ctx.status)
def test_status_super(self):
binding.status = 'foostatus'
with mock.patch.object(driver_context.db, 'get_network_segments'):
- ctx = driver_context.DvrPortContext(plugin,
- plugin_context,
- port,
- network,
- binding)
+ ctx = driver_context.PortContext(plugin,
+ plugin_context,
+ port,
+ network,
+ binding)
self.assertEqual('status', ctx.status)