"device %(device_id)s.")
+class ServicePortInUse(InUse):
+ message = _("Port %(port_id)s cannot be deleted directly via the "
+ "port API: %(reason)s")
+
+
class PortBound(InUse):
message = _("Unable to complete operation on port %(port_id)s, "
"port is already bound, port type: %(vif_type)s, "
# Otherwise it's a stale port that can be removed
fixed_ips = port_db['fixed_ips']
if fixed_ips:
- raise l3.L3PortInUse(port_id=port_id,
- device_owner=port_db['device_owner'])
+ reason = _('has device owner %s') % port_db['device_owner']
+ raise n_exc.ServicePortInUse(port_id=port_db['id'],
+ reason=reason)
else:
LOG.debug("Port %(port_id)s has owner %(port_owner)s, but "
"no IP address, so it can be deleted",
def notify_routers_updated(self, context, router_ids):
super(L3_NAT_db_mixin, self).notify_routers_updated(
context, list(router_ids), 'disassociate_floatingips', {})
+
+
+def _prevent_l3_port_delete_callback(resource, event, trigger, **kwargs):
+ context = kwargs['context']
+ port_id = kwargs['port_id']
+ port_check = kwargs['port_check']
+ l3plugin = manager.NeutronManager.get_service_plugins().get(
+ constants.L3_ROUTER_NAT)
+ if l3plugin and port_check:
+ l3plugin.prevent_l3_port_deletion(context, port_id)
+
+
+def _notify_routers_callback(resource, event, trigger, **kwargs):
+ context = kwargs['context']
+ router_ids = kwargs['router_ids']
+ l3plugin = manager.NeutronManager.get_service_plugins().get(
+ constants.L3_ROUTER_NAT)
+ l3plugin.notify_routers_updated(context, router_ids)
+
+
+def subscribe():
+ registry.subscribe(
+ _prevent_l3_port_delete_callback, resources.PORT, events.BEFORE_DELETE)
+ registry.subscribe(
+ _notify_routers_callback, resources.PORT, events.AFTER_DELETE)
+
+# NOTE(armax): multiple l3 service plugins (potentially out of tree) inherit
+# from l3_db and may need the callbacks to be processed. Having an implicit
+# subscription (through the module import) preserves the existing behavior,
+# and at the same time it avoids fixing it manually in each and every l3 plugin
+# out there. That said, The subscription is also made explicit in the
+# reference l3 plugin. The subscription operation is idempotent so there is no
+# harm in registering the same callback multiple times.
+subscribe()
l3plugin.dvr_update_router_addvm(context, port)
+def _notify_port_delete(event, resource, trigger, **kwargs):
+ context = kwargs['context']
+ port = kwargs['port']
+ removed_routers = kwargs['removed_routers']
+ l3plugin = manager.NeutronManager.get_service_plugins().get(
+ service_constants.L3_ROUTER_NAT)
+ l3plugin.dvr_vmarp_table_update(context, port, "del")
+ for router in removed_routers:
+ l3plugin.remove_router_from_l3_agent(
+ context, router['agent_id'], router['router_id'])
+
+
def subscribe():
registry.subscribe(
_notify_l3_agent_new_port, resources.PORT, events.AFTER_UPDATE)
registry.subscribe(
_notify_l3_agent_new_port, resources.PORT, events.AFTER_CREATE)
+ registry.subscribe(
+ _notify_port_delete, resources.PORT, events.AFTER_DELETE)
"has a floating IP on external network %(net_id)s.")
-class L3PortInUse(nexception.InUse):
- message = _("Port %(port_id)s has owner %(device_owner)s and therefore"
- " cannot be deleted directly via the port API.")
-
-
class RouterExternalGatewayInUseByFloatingIp(nexception.InUse):
message = _("Gateway cannot be updated for router %(router_id)s, since a "
"gateway to external network %(net_id)s is required by one or "
from neutron.api.rpc.handlers import securitygroups_rpc
from neutron.api.v2 import attributes
from neutron.callbacks import events
+from neutron.callbacks import exceptions
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as const
self._process_dvr_port_binding(mech_context, context, attrs)
self._bind_port_if_needed(mech_context)
+ def _pre_delete_port(self, context, port_id, port_check):
+ """Do some preliminary operations before deleting the port."""
+ LOG.debug("Deleting port %s", port_id)
+ try:
+ # notify interested parties of imminent port deletion;
+ # a failure here prevents the operation from happening
+ kwargs = {
+ 'context': context,
+ 'port_id': port_id,
+ 'port_check': port_check
+ }
+ registry.notify(
+ resources.PORT, events.BEFORE_DELETE, self, **kwargs)
+ except exceptions.CallbackFailure as e:
+ # NOTE(armax): preserve old check's behavior
+ if len(e.errors) == 1:
+ raise e.errors[0].error
+ raise exc.ServicePortInUse(port_id=port_id, reason=e)
+
def delete_port(self, context, id, l3_port_check=True):
- LOG.debug("Deleting port %s", id)
+ self._pre_delete_port(context, id, l3_port_check)
+ # TODO(armax): get rid of the l3 dependency in the with block
removed_routers = []
+ router_ids = []
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
is_dvr_enabled = utils.is_extension_supported(
l3plugin, const.L3_DISTRIBUTED_EXT_ALIAS)
- if l3plugin and l3_port_check:
- l3plugin.prevent_l3_port_deletion(context, id)
session = context.session
# REVISIT: Serialize this operation with a semaphore to
{"port_id": id, "owner": device_owner})
super(Ml2Plugin, self).delete_port(context, id)
- # now that we've left db transaction, we are safe to notify
- if l3plugin:
- if is_dvr_enabled:
- l3plugin.dvr_vmarp_table_update(context, port, "del")
- l3plugin.notify_routers_updated(context, router_ids)
- for router in removed_routers:
- l3plugin.remove_router_from_l3_agent(
- context, router['agent_id'], router['router_id'])
+ self._post_delete_port(
+ context, port, router_ids, removed_routers, bound_mech_contexts)
+
+ def _post_delete_port(
+ self, context, port, router_ids, removed_routers, bound_mech_contexts):
+ kwargs = {
+ 'context': context,
+ 'port': port,
+ 'router_ids': router_ids,
+ 'removed_routers': removed_routers
+ }
+ registry.notify(resources.PORT, events.AFTER_DELETE, self, **kwargs)
try:
# Note that DVR Interface ports will have bindings on
# multiple hosts, and so will have multiple mech_contexts,
# delete the port. Ideally we'd notify the caller of the
# fact that an error occurred.
LOG.error(_LE("mechanism_manager.delete_port_postcommit failed for"
- " port %s"), id)
- self.notifier.port_delete(context, id)
+ " port %s"), port['id'])
+ self.notifier.port_delete(context, port['id'])
self.notify_security_groups_member_updated(context, port)
def get_bound_port_context(self, plugin_context, port_id, host=None):
from neutron.common import topics
from neutron.db import common_db_mixin
from neutron.db import extraroute_db
+from neutron.db import l3_db
from neutron.db import l3_dvrscheduler_db
from neutron.db import l3_gwmode_db
from neutron.db import l3_hamode_db
super(L3RouterPlugin, self).__init__()
if 'dvr' in self.supported_extension_aliases:
l3_dvrscheduler_db.subscribe()
+ l3_db.subscribe()
def setup_rpc(self):
# RPC support
import mock
from neutron.common import constants as l3_const
+from neutron.common import exceptions
from neutron import context
from neutron.db import l3_dvr_db
from neutron.extensions import l3
plugin = mock.Mock()
gp.return_value = plugin
plugin._get_port.return_value = port
- self.assertRaises(l3.L3PortInUse,
+ self.assertRaises(exceptions.ServicePortInUse,
self.mixin.prevent_l3_port_deletion,
self.ctx,
port['id'])
from oslo_db import exception as db_exc
+from neutron.callbacks import registry
from neutron.common import constants
from neutron.common import exceptions as exc
from neutron.common import utils
with contextlib.nested(
self.port(),
mock.patch.object(l3plugin, 'disassociate_floatingips'),
- mock.patch.object(l3plugin, 'notify_routers_updated')
+ mock.patch.object(registry, 'notify')
) as (port, disassociate_floatingips, notify):
port_id = port['port']['id']
])
# check that notifier was still triggered
- notify.assert_has_calls([
- mock.call(ctx, disassociate_floatingips.return_value)
- ])
+ self.assertTrue(notify.call_counts)
def test_check_if_compute_port_serviced_by_dvr(self):
self.assertTrue(utils.is_dvr_serviced('compute:None'))
'get_service_plugins',
return_value=self.service_plugins),
self.port(device_owner=device_owner),
- mock.patch.object(self.l3plugin, 'notify_routers_updated'),
+ mock.patch.object(registry, 'notify'),
mock.patch.object(self.l3plugin, 'disassociate_floatingips',
return_value=fip_set),
mock.patch.object(self.l3plugin, 'dvr_deletens_if_no_port',
return_value=[ns_to_delete]),
- mock.patch.object(self.l3plugin, 'remove_router_from_l3_agent')
) as (get_service_plugin, port, notify, disassociate_floatingips,
- dvr_delns_ifno_port, remove_router_from_l3_agent):
+ dvr_delns_ifno_port):
port_id = port['port']['id']
self.plugin.delete_port(self.context, port_id)
- notify.assert_has_calls([mock.call(self.context, fip_set)])
+ self.assertTrue(notify.call_count)
dvr_delns_ifno_port.assert_called_once_with(self.context,
port['port']['id'])
- remove_router_from_l3_agent.assert_has_calls([
- mock.call(self.context, ns_to_delete['agent_id'],
- ns_to_delete['router_id'])
- ])
def test_delete_last_vm_port(self):
self._test_delete_dvr_serviced_port(device_owner='compute:None')
self._test_delete_dvr_serviced_port(device_owner='compute:None',
floating_ip=True)
- def test_delete_vm_port_namespace_already_deleted(self):
- ns_to_delete = {'host': 'myhost',
- 'agent_id': 'vm_l3_agent',
- 'router_id': 'my_router'}
-
- with contextlib.nested(
- mock.patch.object(manager.NeutronManager,
- 'get_service_plugins',
- return_value=self.service_plugins),
- self.port(device_owner='compute:None'),
- mock.patch.object(self.l3plugin, 'dvr_deletens_if_no_port',
- return_value=[ns_to_delete]),
- mock.patch.object(self.l3plugin, 'remove_router_from_l3_agent')
- ) as (get_service_plugin, port, dvr_delns_ifno_port,
- remove_router_from_l3_agent):
-
- self.plugin.delete_port(self.context, port['port']['id'])
- remove_router_from_l3_agent.assert_called_once_with(self.context,
- ns_to_delete['agent_id'], ns_to_delete['router_id'])
-
def test_delete_lbaas_vip_port(self):
self._test_delete_dvr_serviced_port(
device_owner=constants.DEVICE_OWNER_LOADBALANCER)
self.notify.assert_called_once_with('port', 'after_update',
plugin, **kwargs)
- def test_vmarp_table_update_outside_of_delete_transaction(self):
+ def test_notify_outside_of_delete_transaction(self):
+ self.notify.side_effect = (
+ lambda r, e, t, **kwargs: self._ensure_transaction_is_closed())
l3plugin = mock.Mock()
- l3plugin.dvr_vmarp_table_update = (
- lambda *args, **kwargs: self._ensure_transaction_is_closed())
- l3plugin.dvr_deletens_if_no_port.return_value = []
l3plugin.supported_extension_aliases = [
'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS
return_value={'L3_ROUTER_NAT': l3plugin}),
):
plugin = self._create_plugin_for_create_update_port(mock.Mock())
- # deleting the port will call dvr_vmarp_table_update, which will
+ # deleting the port will call registry.notify, which will
# run the transaction balancing function defined in this test
plugin.delete_port(self.context, 'fake_id')
+ self.assertTrue(self.notify.call_count)
self.adminContext = q_context.get_admin_context()
self.dut = L3DvrScheduler()
+ def test__notify_port_delete(self):
+ plugin = manager.NeutronManager.get_plugin()
+ l3plugin = mock.Mock()
+ l3plugin.supported_extension_aliases = [
+ 'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
+ constants.L3_DISTRIBUTED_EXT_ALIAS
+ ]
+ with mock.patch.object(manager.NeutronManager,
+ 'get_service_plugins',
+ return_value={'L3_ROUTER_NAT': l3plugin}):
+ kwargs = {
+ 'context': self.adminContext,
+ 'port': mock.ANY,
+ 'removed_routers': [
+ {'agent_id': 'foo_agent', 'router_id': 'foo_id'},
+ ],
+ }
+ l3_dvrscheduler_db._notify_port_delete(
+ 'port', 'after_delete', plugin, **kwargs)
+ l3plugin.dvr_vmarp_table_update.assert_called_once_with(
+ self.adminContext, mock.ANY, 'del')
+ l3plugin.remove_router_from_l3_agent.assert_called_once_with(
+ self.adminContext, 'foo_agent', 'foo_id')
+
def test_dvr_update_router_addvm(self):
port = {
'device_id': 'abcd',