]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
Fix DVR to service LBaaS VIP Ports
authorSwaminathan Vasudevan <swaminathan.vasudevan@hp.com>
Thu, 14 Aug 2014 06:38:56 +0000 (23:38 -0700)
committerVivekanandan Narasimhan <vivekanandan.narasimhan@hp.com>
Tue, 26 Aug 2014 13:43:12 +0000 (06:43 -0700)
Currently, DVR router namespaces are created only
when there is a valid VM port on the compute
node, or for the gateway-port on the service node.
But when an LBaaS VIP port is created the l3 agent
does not create a DVR namespace to service the VIP port.
This fix enables DVR namespaces to be created to
service the LBaaS VIP port.

Also, this fix enables L2 Agent running in DVR
mode, to add-in OVS rules to enable packets to
be routed to such LBaaS VIP Ports which are
resident on DVR routed interfaces.

Therefore, with this fix both East-West and
North-South traffic will be serviced by DVR
for LBaas VIP Ports.

DocImpact

Authored-by: Swaminathan Vasudevan <swaminathan.vasudevan@hp.com>
Co-Authored-By: Vivekanandan Narasimhan <vivekanandan.narasimhan@hp.com>
Change-Id: I698b971d50721fb0512a11569f7d3139d0d456f3
Closes-Bug: #1356464

13 files changed:
neutron/api/rpc/handlers/dvr_rpc.py
neutron/common/constants.py
neutron/common/utils.py
neutron/db/dvr_mac_db.py
neutron/db/l3_agentschedulers_db.py
neutron/db/l3_dvr_db.py
neutron/db/l3_dvrscheduler_db.py
neutron/plugins/ml2/plugin.py
neutron/plugins/ml2/rpc.py
neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py
neutron/tests/unit/ml2/test_ml2_plugin.py
neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py
neutron/tests/unit/test_l3_schedulers.py

index a555c91b2f34b678e04eabb9ce3369864bea95bc..5ac21015130a2b78907e16c71f85b66c56fdfc2e 100644 (file)
@@ -40,11 +40,12 @@ class DVRServerRpcApiMixin(object):
                          version=self.DVR_RPC_VERSION)
 
     @log.log
-    def get_compute_ports_on_host_by_subnet(self, context, host, subnet):
+    def get_ports_on_host_by_subnet(self, context, host, subnet):
         return self.call(context,
-                         self.make_msg('get_compute_ports_on_host_by_subnet',
-                                       host=host,
-                                       subnet=subnet),
+                         self.make_msg(
+                             'get_ports_on_host_by_subnet',
+                             host=host,
+                             subnet=subnet),
                          version=self.DVR_RPC_VERSION)
 
     @log.log
@@ -70,10 +71,9 @@ class DVRServerRpcCallbackMixin(object):
     def get_dvr_mac_address_by_host(self, context, host):
         return self.plugin.get_dvr_mac_address_by_host(context, host)
 
-    def get_compute_ports_on_host_by_subnet(self, context, host, subnet):
-        return self.plugin.get_compute_ports_on_host_by_subnet(context,
-                                                               host,
-                                                               subnet)
+    def get_ports_on_host_by_subnet(self, context, host, subnet):
+        return self.plugin.get_ports_on_host_by_subnet(context,
+            host, subnet)
 
     def get_subnet_for_dvr(self, context, subnet):
         return self.plugin.get_subnet_for_dvr(context, subnet)
index e1c61a8bc7d4bda7b4e213139fa34b39527598d9..f1c15c535b618c26a18b5b31f50df51bc4da792c 100644 (file)
@@ -36,6 +36,7 @@ DEVICE_OWNER_DHCP = "network:dhcp"
 DEVICE_OWNER_DVR_INTERFACE = "network:router_interface_distributed"
 DEVICE_OWNER_AGENT_GW = "network:floatingip_agent_gateway"
 DEVICE_OWNER_ROUTER_SNAT = "network:router_centralized_snat"
+DEVICE_OWNER_LOADBALANCER = "neutron:LOADBALANCER"
 
 DEVICE_ID_RESERVED_DHCP_PORT = "reserved_dhcp_port"
 
index 5a3a6a64fc3de763f4151e2f54fb9ae8a4c24dd1..006e3b7dc89e03a819d4304efc6cfd3aa94182ee 100644 (file)
@@ -335,3 +335,16 @@ class exception_logger(object):
                 with excutils.save_and_reraise_exception():
                     self.logger(e)
         return call
+
+
+def is_dvr_serviced(device_owner):
+        """Check if the port need to be serviced by DVR
+
+        Helper function to check the device owners of the
+        ports in the compute and service node to make sure
+        if they are required for DVR or any service directly or
+        indirectly associated with DVR.
+        """
+        if (device_owner.startswith('compute:') or (
+            q_const.DEVICE_OWNER_LOADBALANCER == device_owner)):
+            return True
index c590b3ac204c4bb7214a07dbf66d16fc4ed65d96..a0a2740e901cf51678eb64034c67f451cf7f5b7d 100644 (file)
@@ -22,6 +22,7 @@ from neutron.common import log
 from neutron.common import utils
 from neutron.db import model_base
 from neutron.extensions import dvr as ext_dvr
+from neutron.extensions import portbindings
 from neutron import manager
 from neutron.openstack.common import log as logging
 from oslo.config import cfg
@@ -121,24 +122,35 @@ class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase):
                 'mac_address': dvr_mac_entry['mac_address']}
 
     @log.log
-    def get_compute_ports_on_host_by_subnet(self, context, host, subnet):
+    def get_ports_on_host_by_subnet(self, context, host, subnet):
+        """Returns ports of interest, on a given subnet in the input host
+
+        This method returns ports that need to be serviced by DVR.
+        :param context: rpc request context
+        :param host: host id to match and extract ports of interest
+        :param subnet: subnet id to match and extract ports of interest
+        :returns list -- Ports on the given subnet in the input host
+        """
         # FIXME(vivek, salv-orlando): improve this query by adding the
         # capability of filtering by binding:host_id
-        vm_ports_by_host = []
+        ports_by_host = []
         filter = {'fixed_ips': {'subnet_id': [subnet]}}
         ports = self.plugin.get_ports(context, filters=filter)
-        LOG.debug("List of Ports on subnet %(subnet)s received as %(ports)s",
-                  {'subnet': subnet, 'ports': ports})
+        LOG.debug("List of Ports on subnet %(subnet)s at host %(host)s "
+                  "received as %(ports)s",
+                  {'subnet': subnet, 'host': host, 'ports': ports})
         for port in ports:
-            if 'compute:' in port['device_owner']:
-                if port['binding:host_id'] == host:
-                    port_dict = self.plugin._make_port_dict(
-                        port, process_extensions=False)
-                    vm_ports_by_host.append(port_dict)
-        LOG.debug("Returning list of VM Ports on host %(host)s for subnet "
-                  "%(subnet)s ports %(ports)s",
-                  {'host': host, 'subnet': subnet, 'ports': vm_ports_by_host})
-        return vm_ports_by_host
+            device_owner = port['device_owner']
+            if (utils.is_dvr_serviced(device_owner)):
+                if port[portbindings.HOST_ID] == host:
+                    port_dict = self.plugin._make_port_dict(port,
+                        process_extensions=False)
+                    ports_by_host.append(port_dict)
+        LOG.debug("Returning list of dvr serviced ports on host %(host)s"
+                  " for subnet %(subnet)s ports %(ports)s",
+                  {'host': host, 'subnet': subnet,
+                   'ports': ports_by_host})
+        return ports_by_host
 
     @log.log
     def get_subnet_for_dvr(self, context, subnet):
index 8dd2739cb9ce7b004f57d9bde19572385515b73a..de2de1609954c19aef5cbc459a600ed8be516c47 100644 (file)
@@ -24,6 +24,7 @@ from sqlalchemy.orm import exc
 from sqlalchemy.orm import joinedload
 
 from neutron.common import constants
+from neutron.common import utils as n_utils
 from neutron import context as n_ctx
 from neutron.db import agents_db
 from neutron.db import agentschedulers_db
@@ -320,8 +321,12 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
                 if agentschedulers_db.AgentSchedulerDbMixin.is_eligible_agent(
                     active, l3_agent)]
 
-    def check_vmexists_on_l3agent(self, context, l3_agent, router_id,
-                                  subnet_id):
+    def check_ports_exist_on_l3agent(self, context, l3_agent, router_id,
+                                     subnet_id):
+        """
+        This function checks for existence of dvr serviceable
+        ports on the host, running the input l3agent.
+        """
         if not subnet_id:
             return True
 
@@ -329,7 +334,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
         filter = {'fixed_ips': {'subnet_id': [subnet_id]}}
         ports = core_plugin.get_ports(context, filters=filter)
         for port in ports:
-            if ("compute:" in port['device_owner'] and
+            if (n_utils.is_dvr_serviced(port['device_owner']) and
                 l3_agent['host'] == port['binding:host_id']):
                     return True
 
@@ -397,7 +402,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
                 not is_router_distributed):
                 candidates.append(l3_agent)
             elif is_router_distributed and agent_mode.startswith('dvr') and (
-                self.check_vmexists_on_l3agent(
+                self.check_ports_exist_on_l3agent(
                     context, l3_agent, sync_router['id'], subnet_id)):
                 candidates.append(l3_agent)
         return candidates
index 75b8ab356aff98377e154d553b3b94337ec92d89..6a91c0fe068cb7e27df5c27816b6fb0c4dcbb8f0 100644 (file)
@@ -17,6 +17,7 @@ from oslo.config import cfg
 from neutron.api.v2 import attributes
 from neutron.common import constants as l3_const
 from neutron.common import exceptions as n_exc
+from neutron.common import utils as n_utils
 from neutron.db import l3_attrs_db
 from neutron.db import l3_db
 from neutron.db import l3_dvrscheduler_db as l3_dvrsched_db
@@ -333,10 +334,9 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
     def get_vm_port_hostid(self, context, port_id, port=None):
         """Return the portbinding host_id."""
         vm_port_db = port or self._core_plugin.get_port(context, port_id)
-        allowed_device_owners = ("neutron:LOADBALANCER", DEVICE_OWNER_AGENT_GW)
         device_owner = vm_port_db['device_owner'] if vm_port_db else ""
-        if (device_owner in allowed_device_owners or
-            device_owner.startswith("compute:")):
+        if (n_utils.is_dvr_serviced(device_owner) or
+            device_owner == DEVICE_OWNER_AGENT_GW):
             return vm_port_db[portbindings.HOST_ID]
 
     def get_agent_gw_ports_exist_for_network(
index 78fe3f0d053c4fd6ce18687490fd8aa9668e30c0..9c632b4ec6a075ba67a9c0f685980b7c8a54cb94 100644 (file)
@@ -20,6 +20,7 @@ from sqlalchemy import orm
 from sqlalchemy.orm import exc
 
 from neutron.common import constants as q_const
+from neutron.common import utils as n_utils
 from neutron.db import agents_db
 from neutron.db import l3_agentschedulers_db as l3agent_sch_db
 from neutron.db import model_base
@@ -135,17 +136,18 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
             subnet_ids.add(int_subnet)
         return subnet_ids
 
-    def check_vm_exists_on_subnet(self, context, host, port_id, subnet_id):
-        """Check if there is any vm exists on the subnet_id."""
+    def check_ports_active_on_host_and_subnet(self, context, host,
+                                         port_id, subnet_id):
+        """Check if there is any dvr serviceable port on the subnet_id."""
         filter_sub = {'fixed_ips': {'subnet_id': [subnet_id]}}
         ports = self._core_plugin.get_ports(context, filters=filter_sub)
         for port in ports:
-            if ("compute:" in port['device_owner']
+            if (n_utils.is_dvr_serviced(port['device_owner'])
                 and port['status'] == 'ACTIVE'
                 and port['binding:host_id'] == host
                 and port['id'] != port_id):
-                LOG.debug('DVR: VM exists for subnet %(subnet_id)s on host '
-                          '%(host)s', {'subnet_id': subnet_id,
+                LOG.debug('DVR: Active port exists for subnet %(subnet_id)s '
+                          'on host %(host)s', {'subnet_id': subnet_id,
                                        'host': host})
                 return True
         return False
@@ -164,10 +166,10 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
             subnet_ids = self.get_subnet_ids_on_router(context, router_id)
             vm_exists_on_subnet = False
             for subnet in subnet_ids:
-                if self.check_vm_exists_on_subnet(context,
-                                                  port_host,
-                                                  port_id,
-                                                  subnet):
+                if self.check_ports_active_on_host_and_subnet(context,
+                                                              port_host,
+                                                              port_id,
+                                                              subnet):
                     vm_exists_on_subnet = True
                     break
 
index dae053c9273227f42071696f8e52c8d9d5cf0455..6ab9f246f4cb5fa86f7591fc62e79b4cf8fca1f9 100644 (file)
@@ -155,7 +155,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
             binding.host != host):
             binding.host = host
             changes = True
-            if "compute:" in port['device_owner']:
+            # Whenever a DVR serviceable port comes up on a
+            # node, it has to be communicated to the L3 Plugin
+            # and agent for creating the respective namespaces.
+            if (utils.is_dvr_serviced(port['device_owner'])):
                 l3plugin = manager.NeutronManager.get_service_plugins().get(
                     service_constants.L3_ROUTER_NAT)
                 if (utils.is_extension_supported(
index 0befbebbbca0138fba53fe269b3e3d587b35415c..1da6084a002ab7cdaa666e3f78ef060b2a8b8f06 100644 (file)
@@ -207,11 +207,11 @@ class RpcCallbacks(n_rpc.RpcCallback,
         return super(RpcCallbacks, self).get_dvr_mac_address_by_host(
             rpc_context, host)
 
-    def get_compute_ports_on_host_by_subnet(self, rpc_context, **kwargs):
+    def get_ports_on_host_by_subnet(self, rpc_context, **kwargs):
         host = kwargs.get('host')
         subnet = kwargs.get('subnet')
         LOG.debug("DVR Agent requests list of VM ports on host %s", host)
-        return super(RpcCallbacks, self).get_compute_ports_on_host_by_subnet(
+        return super(RpcCallbacks, self).get_ports_on_host_by_subnet(
             rpc_context, host, subnet)
 
     def get_subnet_for_dvr(self, rpc_context, **kwargs):
index cb0a36d8ad448f3a8be7d2937518176778afadd5..dcb9f77a44a957c25716353c80da44410391ffaf 100644 (file)
@@ -17,6 +17,7 @@
 
 from neutron.api.rpc.handlers import dvr_rpc
 from neutron.common import constants as n_const
+from neutron.common import utils as n_utils
 from neutron.openstack.common import log as logging
 from neutron.plugins.openvswitch.common import constants
 
@@ -310,10 +311,10 @@ class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin):
         subnet_info = ldm.get_subnet_info()
         ip_subnet = subnet_info['cidr']
         local_compute_ports = (
-            self.plugin_rpc.get_compute_ports_on_host_by_subnet(
+            self.plugin_rpc.get_ports_on_host_by_subnet(
                 self.context, self.host, subnet_uuid))
         LOG.debug("DVR: List of ports received from "
-                  "get_compute_ports_on_host_by_subnet %s",
+                  "get_ports_on_host_by_subnet %s",
                   local_compute_ports)
         for prt in local_compute_ports:
             vif = self.int_br.get_vif_port_by_id(prt['id'])
@@ -389,8 +390,8 @@ class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin):
         ovsport.add_subnet(subnet_uuid)
         self.local_ports[port.vif_id] = ovsport
 
-    def _bind_compute_port_on_dvr_subnet(self, port, fixed_ips,
-                                         device_owner, local_vlan):
+    def _bind_port_on_dvr_subnet(self, port, fixed_ips,
+                                 device_owner, local_vlan):
         # Handle new compute port added use-case
         subnet_uuid = None
         for ips in fixed_ips:
@@ -517,10 +518,10 @@ class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin):
                                                          device_owner,
                                                          local_vlan_id)
 
-        if device_owner and device_owner.startswith('compute:'):
-            self._bind_compute_port_on_dvr_subnet(port, fixed_ips,
-                                                  device_owner,
-                                                  local_vlan_id)
+        if device_owner and n_utils.is_dvr_serviced(device_owner):
+            self._bind_port_on_dvr_subnet(port, fixed_ips,
+                                          device_owner,
+                                          local_vlan_id)
 
         if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
             self._bind_centralized_snat_port_on_dvr_subnet(port, fixed_ips,
@@ -593,7 +594,7 @@ class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin):
         # release port state
         self.local_ports.pop(port.vif_id, None)
 
-    def _unbind_compute_port_on_dvr_subnet(self, port, local_vlan):
+    def _unbind_port_on_dvr_subnet(self, port, local_vlan):
 
         ovsport = self.local_ports[port.vif_id]
         # This confirms that this compute port being removed belonged
@@ -710,9 +711,8 @@ class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin):
             self._unbind_distributed_router_interface_port(vif_port,
                                                            local_vlan_id)
 
-        if device_owner and device_owner.startswith('compute:'):
-            self._unbind_compute_port_on_dvr_subnet(vif_port,
-                                                    local_vlan_id)
+        if device_owner and n_utils.is_dvr_serviced(device_owner):
+            self._unbind_port_on_dvr_subnet(vif_port, local_vlan_id)
 
         if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
             self._unbind_centralized_snat_port_on_dvr_subnet(vif_port,
index 5e2d261ae48be623b3ac2430baa098208c5cf7cc..b01aab0b97b58d3f4a79d9f0c9c24745342a4a0d 100644 (file)
@@ -21,6 +21,7 @@ import webob
 
 from neutron.common import constants
 from neutron.common import exceptions as exc
+from neutron.common import utils
 from neutron import context
 from neutron.extensions import multiprovidernet as mpnet
 from neutron.extensions import portbindings
@@ -163,6 +164,17 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
                 mock.call(ctx, disassociate_floatingips.return_value)
             ])
 
+    def test_check_if_compute_port_serviced_by_dvr(self):
+        self.assertTrue(utils.is_dvr_serviced('compute:None'))
+
+    def test_check_if_lbaas_vip_port_serviced_by_dvr(self):
+        self.assertTrue(utils.is_dvr_serviced(
+            constants.DEVICE_OWNER_LOADBALANCER))
+
+    def test_check_if_port_not_serviced_by_dvr(self):
+        self.assertFalse(utils.is_dvr_serviced(
+            constants.DEVICE_OWNER_ROUTER_INTF))
+
     def test_disassociate_floatingips_do_notify_returns_nothing(self):
         ctx = context.get_admin_context()
         l3plugin = manager.NeutronManager.get_service_plugins().get(
index 17da5960da13f956c27927c483d55493b8c2c908..91ed43d9e14b2643a7e7e6405e25155960144264 100644 (file)
@@ -223,8 +223,8 @@ class TestOvsNeutronAgent(base.BaseTestCase):
                                   'cidr': '1.1.1.0/24',
                                   'gateway_mac': 'aa:bb:cc:11:22:33'}),
                 mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                                  'get_compute_ports_on_host_by_subnet',
-                                  return_value=[]),
+                    'get_ports_on_host_by_subnet',
+                    return_value=[]),
                 mock.patch.object(self.agent.dvr_agent.int_br,
                                   'get_vif_port_by_id',
                                   return_value=self._port),
@@ -243,7 +243,7 @@ class TestOvsNeutronAgent(base.BaseTestCase):
                 self.assertTrue(add_flow_tun_fn.called)
                 self.assertTrue(delete_flows_int_fn.called)
 
-    def test_port_bound_for_dvr_with_compute_ports(self, ofport=10):
+    def _test_port_bound_for_dvr(self, device_owner):
         self._setup_for_dvr_test()
         with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
                         'set_db_attribute',
@@ -259,8 +259,8 @@ class TestOvsNeutronAgent(base.BaseTestCase):
                                       'cidr': '1.1.1.0/24',
                                       'gateway_mac': 'aa:bb:cc:11:22:33'}),
                 mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                                  'get_compute_ports_on_host_by_subnet',
-                                  return_value=[]),
+                    'get_ports_on_host_by_subnet',
+                    return_value=[]),
                 mock.patch.object(self.agent.dvr_agent.int_br,
                                   'get_vif_port_by_id',
                                   return_value=self._port),
@@ -279,11 +279,18 @@ class TestOvsNeutronAgent(base.BaseTestCase):
                 self.agent.port_bound(self._compute_port, self._net_uuid,
                                       'vxlan', None, None,
                                       self._compute_fixed_ips,
-                                      "compute:None", False)
+                                      device_owner, False)
                 self.assertTrue(add_flow_tun_fn.called)
                 self.assertTrue(add_flow_int_fn.called)
                 self.assertTrue(delete_flows_int_fn.called)
 
+    def test_port_bound_for_dvr_with_compute_ports(self):
+        self._test_port_bound_for_dvr(device_owner="compute:None")
+
+    def test_port_bound_for_dvr_with_lbaas_vip_ports(self):
+        self._test_port_bound_for_dvr(
+            device_owner=n_const.DEVICE_OWNER_LOADBALANCER)
+
     def test_port_bound_for_dvr_with_csnat_ports(self, ofport=10):
         self._setup_for_dvr_test()
         with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
@@ -299,8 +306,8 @@ class TestOvsNeutronAgent(base.BaseTestCase):
                                   'cidr': '1.1.1.0/24',
                                   'gateway_mac': 'aa:bb:cc:11:22:33'}),
                 mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                                  'get_compute_ports_on_host_by_subnet',
-                                  return_value=[]),
+                    'get_ports_on_host_by_subnet',
+                    return_value=[]),
                 mock.patch.object(self.agent.dvr_agent.int_br,
                                   'get_vif_port_by_id',
                                   return_value=self._port),
@@ -334,8 +341,8 @@ class TestOvsNeutronAgent(base.BaseTestCase):
                                   'cidr': '1.1.1.0/24',
                                   'gateway_mac': 'aa:bb:cc:11:22:33'}),
                 mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                                  'get_compute_ports_on_host_by_subnet',
-                                  return_value=[]),
+                    'get_ports_on_host_by_subnet',
+                    return_value=[]),
                 mock.patch.object(self.agent.dvr_agent.int_br,
                                   'get_vif_port_by_id',
                                   return_value=self._port),
@@ -368,7 +375,7 @@ class TestOvsNeutronAgent(base.BaseTestCase):
                 self.assertTrue(delete_flows_int_fn.called)
                 self.assertTrue(delete_flows_tun_fn.called)
 
-    def test_treat_devices_removed_for_dvr_with_compute_ports(self, ofport=10):
+    def _test_treat_devices_removed_for_dvr(self, device_owner):
         self._setup_for_dvr_test()
         with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
                         'set_db_attribute',
@@ -383,8 +390,8 @@ class TestOvsNeutronAgent(base.BaseTestCase):
                                   'cidr': '1.1.1.0/24',
                                   'gateway_mac': 'aa:bb:cc:11:22:33'}),
                 mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                                  'get_compute_ports_on_host_by_subnet',
-                                  return_value=[]),
+                    'get_ports_on_host_by_subnet',
+                    return_value=[]),
                 mock.patch.object(self.agent.dvr_agent.int_br,
                                   'get_vif_port_by_id',
                                   return_value=self._port),
@@ -404,7 +411,7 @@ class TestOvsNeutronAgent(base.BaseTestCase):
                                       self._net_uuid, 'vxlan',
                                       None, None,
                                       self._compute_fixed_ips,
-                                      "compute:None", False)
+                                      device_owner, False)
                 self.assertTrue(add_flow_tun_fn.called)
                 self.assertTrue(add_flow_int_fn.called)
                 self.assertTrue(delete_flows_int_fn.called)
@@ -420,6 +427,13 @@ class TestOvsNeutronAgent(base.BaseTestCase):
                 self.agent.treat_devices_removed([self._compute_port.vif_id])
                 self.assertTrue(delete_flows_int_fn.called)
 
+    def test_treat_devices_removed_for_dvr_with_compute_ports(self):
+        self._test_treat_devices_removed_for_dvr(device_owner="compute:None")
+
+    def test_treat_devices_removed_for_dvr_with_lbaas_vip_ports(self):
+        self._test_treat_devices_removed_for_dvr(
+            device_owner=n_const.DEVICE_OWNER_LOADBALANCER)
+
     def test_treat_devices_removed_for_dvr_csnat_port(self, ofport=10):
         self._setup_for_dvr_test()
         with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
@@ -435,8 +449,8 @@ class TestOvsNeutronAgent(base.BaseTestCase):
                                   'cidr': '1.1.1.0/24',
                                   'gateway_mac': 'aa:bb:cc:11:22:33'}),
                 mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                                  'get_compute_ports_on_host_by_subnet',
-                                  return_value=[]),
+                    'get_ports_on_host_by_subnet',
+                    return_value=[]),
                 mock.patch.object(self.agent.dvr_agent.int_br,
                                   'get_vif_port_by_id',
                                   return_value=self._port),
index 86d7c0d0a0004c3df21be407d5ebee4afffd28c7..68e724f1d90e3333563393b245158a3afdf34a23 100644 (file)
@@ -584,7 +584,7 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase,
             self.assertEqual(sub_ids.pop(),
                             dvr_port.get('fixed_ips').pop(0).get('subnet_id'))
 
-    def test_check_vm_exists_on_subnet(self):
+    def test_check_ports_active_on_host_and_subnet(self):
         dvr_port = {
                 'id': 'dvr_port1',
                 'device_id': 'r1',
@@ -613,12 +613,40 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase,
                        '.L3AgentNotifyAPI')):
             sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
                                                         r1['id'])
-            result = self.dut.check_vm_exists_on_subnet(
+            result = self.dut.check_ports_active_on_host_and_subnet(
                                                     self.adminContext,
                                                     'thisHost', 'dvr_port1',
                                                     sub_ids)
             self.assertFalse(result)
 
+    def test_check_dvr_serviced_port_exists_on_subnet(self):
+        vip_port = {
+                'id': 'lbaas-vip-port1',
+                'device_id': 'vip-pool-id',
+                'status': 'ACTIVE',
+                'binding:host_id': 'thisHost',
+                'device_owner': constants.DEVICE_OWNER_LOADBALANCER,
+                'fixed_ips': [
+                    {
+                        'subnet_id': 'my-subnet-id',
+                        'ip_address': '10.10.10.1'
+                    }
+                ]
+        }
+
+        with contextlib.nested(
+            mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
+                       '.get_ports', return_value=[vip_port]),
+            mock.patch('neutron.common.utils.is_dvr_serviced',
+                       return_value=True)) as (get_ports_fn, dvr_serv_fn):
+            result = self.dut.check_ports_active_on_host_and_subnet(
+                                                    self.adminContext,
+                                                    'thisHost',
+                                                    'dvr1-intf-id',
+                                                    'my-subnet-id')
+            self.assertTrue(result)
+            self.assertEqual(dvr_serv_fn.call_count, 1)
+
     def test_schedule_snat_router_with_snat_candidates(self):
         agent = agents_db.Agent()
         agent.admin_state_up = True