]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
Delete DVR namespaces on node after removing last VM
authorStephen Ma <stephen.ma@hp.com>
Wed, 6 Aug 2014 22:33:32 +0000 (22:33 +0000)
committerStephen Ma <stephen.ma@hp.com>
Wed, 13 Aug 2014 22:56:38 +0000 (22:56 +0000)
After removing the last VM using a distributed router,
the router's namespaces are still present on the VM host
The problem is that the neutron API server sent the router
remove notification to the L3 agent using the name of the
host running the L3 agent instead of the agent's uuid. This
caused an error when sending the notification. So the L3
agent never had the chance to cleanup the namespace.
This problem is fixed here.

Afterwards, it was found that the notification was still not
sent. The reason is that the router/L3-agent binding has
already been deleted before the routine that sends the
router removed notification was called. The notifier routine
errored out when it tried to delete the same router/L3 agent
binding. This problem is fixed in this patch by removing the
binding removal step from the DVR scheduler.

Change-Id: I6323d7ff438bb6c31e4a794bd3da96bf132fdc85
Closes-Bug: 1353165

neutron/db/l3_dvrscheduler_db.py
neutron/plugins/ml2/plugin.py
neutron/tests/unit/ml2/test_ml2_plugin.py

index 5f23cee959373df854661700488d635fb27df23b..41fb244846de3385047f301efbb2bdaa649e20e6 100644 (file)
@@ -112,19 +112,6 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
                 return True
         return False
 
-    def delete_namespace_on_host(self, context, host, router_id):
-        """Delete the given router namespace on the host."""
-        agent = self._get_agent_by_type_and_host(
-            context, q_const.AGENT_TYPE_L3, host)
-        agent_id = str(agent.id)
-        with context.session.begin(subtransactions=True):
-            (context.session.query(l3agent_sch_db.RouterL3AgentBinding).
-             filter_by(router_id=router_id, l3_agent_id=agent_id).
-             delete(synchronize_session=False))
-        LOG.debug('Deleted router %(router_id)s on agent.id %(id)s',
-                  {'router_id': router_id,
-                   'id': agent.id})
-
     def dvr_deletens_if_no_vm(self, context, port_id):
         """Delete the DVR namespace if no VM exists."""
         router_ids = self.get_dvr_routers_by_vmportid(context, port_id)
@@ -162,11 +149,14 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
                     # unbind this port from router
                     dvr_binding['router_id'] = None
                     dvr_binding.update(dvr_binding)
-            self.delete_namespace_on_host(context, port_host, router_id)
-            info = {'router_id': router_id, 'host': port_host}
+            agent = self._get_agent_by_type_and_host(context,
+                                                     q_const.AGENT_TYPE_L3,
+                                                     port_host)
+            info = {'router_id': router_id, 'host': port_host,
+                    'agent_id': str(agent.id)}
             removed_router_info.append(info)
-            LOG.debug('Deleted router namespace %(router_id)s '
-                      'on host %(host)s', info)
+            LOG.debug('Router namespace %(router_id)s on host %(host)s '
+                      'to be deleted', info)
         return removed_router_info
 
     def bind_snat_router(self, context, router_id, chosen_agent):
index f92b60fa5bf2b7c3fe0c82b9f9d627c6bdf2de13..c29122eaf17cd5329d504df0ce4c8fba61126164 100644 (file)
@@ -1026,7 +1026,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
             l3plugin.notify_routers_updated(context, router_ids)
             for router in removed_routers:
                 l3plugin.remove_router_from_l3_agent(
-                    context, router['host'], router['router_id'])
+                    context, router['agent_id'], router['router_id'])
 
         try:
             # for both normal and DVR Interface ports, only one invocation of
index ac45cb79d824189988c8c3e65129138fc6d6c55f..6cbc858013c927b1d1d992e7086bb650e7dbfbfb 100644 (file)
@@ -19,6 +19,7 @@ import testtools
 import uuid
 import webob
 
+from neutron.common import constants
 from neutron.common import exceptions as exc
 from neutron import context
 from neutron.extensions import multiprovidernet as mpnet
@@ -174,6 +175,75 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
             self.assertIsNone(l3plugin.disassociate_floatingips(ctx, port_id))
 
 
+class TestMl2DvrPortsV2(TestMl2PortsV2):
+    def setUp(self):
+        super(TestMl2DvrPortsV2, self).setUp()
+        extensions = ['router',
+                      constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
+                      constants.L3_DISTRIBUTED_EXT_ALIAS]
+        self.plugin = manager.NeutronManager.get_plugin()
+        self.l3plugin = mock.Mock()
+        type(self.l3plugin).supported_extension_aliases = (
+            mock.PropertyMock(return_value=extensions))
+        self.service_plugins = {'L3_ROUTER_NAT': self.l3plugin}
+
+    def test_delete_last_vm_port(self):
+        fip_set = set()
+        ns_to_delete = {'host': 'vmhost', 'agent_id': 'vm_l3_agent',
+                        'router_id': 'my_router'}
+
+        with contextlib.nested(
+            mock.patch.object(manager.NeutronManager,
+                              'get_service_plugins',
+                              return_value=self.service_plugins),
+            self.port(do_delete=False, device_owner='compute:None'),
+            mock.patch.object(self.l3plugin, 'notify_routers_updated'),
+            mock.patch.object(self.l3plugin, 'disassociate_floatingips',
+                              return_value=fip_set),
+            mock.patch.object(self.l3plugin, 'dvr_deletens_if_no_vm',
+                              return_value=[ns_to_delete]),
+            mock.patch.object(self.l3plugin, 'remove_router_from_l3_agent')
+        ) as (get_service_plugin, port, notify, disassociate_floatingips,
+              ddinv, remove_router_from_l3_agent):
+
+            port_id = port['port']['id']
+            self.plugin.delete_port(self.context, port_id)
+
+            notify.assert_has_calls([mock.call(self.context, fip_set)])
+            remove_router_from_l3_agent.assert_has_calls([
+                mock.call(self.context, ns_to_delete['agent_id'],
+                          ns_to_delete['router_id'])
+            ])
+
+    def test_delete_last_vm_port_with_floatingip(self):
+        ns_to_delete = {'host': 'vmhost', 'agent_id': 'vm_l3_agent',
+                        'router_id': 'my_router'}
+        fip_set = set([ns_to_delete['router_id']])
+
+        with contextlib.nested(
+            mock.patch.object(manager.NeutronManager,
+                              'get_service_plugins',
+                              return_value=self.service_plugins),
+            self.port(do_delete=False, device_owner='compute:None'),
+            mock.patch.object(self.l3plugin, 'notify_routers_updated'),
+            mock.patch.object(self.l3plugin, 'disassociate_floatingips',
+                              return_value=fip_set),
+            mock.patch.object(self.l3plugin, 'dvr_deletens_if_no_vm',
+                              return_value=[ns_to_delete]),
+            mock.patch.object(self.l3plugin, 'remove_router_from_l3_agent')
+        ) as (get_service_plugins, port, notify, disassociate_floatingips,
+              ddinv, remove_router_from_l3_agent):
+
+            port_id = port['port']['id']
+            self.plugin.delete_port(self.context, port_id)
+
+            notify.assert_has_calls([mock.call(self.context, fip_set)])
+            remove_router_from_l3_agent.assert_has_calls([
+                mock.call(self.context, ns_to_delete['agent_id'],
+                          ns_to_delete['router_id'])
+            ])
+
+
 class TestMl2PortBinding(Ml2PluginV2TestCase,
                          test_bindings.PortBindingsTestCase):
     # Test case does not set binding:host_id, so ml2 does not attempt