version=self.DVR_RPC_VERSION)
@log.log
- def get_compute_ports_on_host_by_subnet(self, context, host, subnet):
+ def get_ports_on_host_by_subnet(self, context, host, subnet):
return self.call(context,
- self.make_msg('get_compute_ports_on_host_by_subnet',
- host=host,
- subnet=subnet),
+ self.make_msg(
+ 'get_ports_on_host_by_subnet',
+ host=host,
+ subnet=subnet),
version=self.DVR_RPC_VERSION)
@log.log
def get_dvr_mac_address_by_host(self, context, host):
return self.plugin.get_dvr_mac_address_by_host(context, host)
- def get_compute_ports_on_host_by_subnet(self, context, host, subnet):
- return self.plugin.get_compute_ports_on_host_by_subnet(context,
- host,
- subnet)
+ def get_ports_on_host_by_subnet(self, context, host, subnet):
+ return self.plugin.get_ports_on_host_by_subnet(context,
+ host, subnet)
def get_subnet_for_dvr(self, context, subnet):
return self.plugin.get_subnet_for_dvr(context, subnet)
DEVICE_OWNER_DVR_INTERFACE = "network:router_interface_distributed"
DEVICE_OWNER_AGENT_GW = "network:floatingip_agent_gateway"
DEVICE_OWNER_ROUTER_SNAT = "network:router_centralized_snat"
+DEVICE_OWNER_LOADBALANCER = "neutron:LOADBALANCER"
DEVICE_ID_RESERVED_DHCP_PORT = "reserved_dhcp_port"
with excutils.save_and_reraise_exception():
self.logger(e)
return call
+
+
+def is_dvr_serviced(device_owner):
+ """Check if the port need to be serviced by DVR
+
+ Helper function to check the device owners of the
+ ports in the compute and service node to make sure
+ if they are required for DVR or any service directly or
+ indirectly associated with DVR.
+ """
+ if (device_owner.startswith('compute:') or (
+ q_const.DEVICE_OWNER_LOADBALANCER == device_owner)):
+ return True
from neutron.common import utils
from neutron.db import model_base
from neutron.extensions import dvr as ext_dvr
+from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import log as logging
from oslo.config import cfg
'mac_address': dvr_mac_entry['mac_address']}
@log.log
- def get_compute_ports_on_host_by_subnet(self, context, host, subnet):
+ def get_ports_on_host_by_subnet(self, context, host, subnet):
+ """Returns ports of interest, on a given subnet in the input host
+
+ This method returns ports that need to be serviced by DVR.
+ :param context: rpc request context
+ :param host: host id to match and extract ports of interest
+ :param subnet: subnet id to match and extract ports of interest
+ :returns list -- Ports on the given subnet in the input host
+ """
# FIXME(vivek, salv-orlando): improve this query by adding the
# capability of filtering by binding:host_id
- vm_ports_by_host = []
+ ports_by_host = []
filter = {'fixed_ips': {'subnet_id': [subnet]}}
ports = self.plugin.get_ports(context, filters=filter)
- LOG.debug("List of Ports on subnet %(subnet)s received as %(ports)s",
- {'subnet': subnet, 'ports': ports})
+ LOG.debug("List of Ports on subnet %(subnet)s at host %(host)s "
+ "received as %(ports)s",
+ {'subnet': subnet, 'host': host, 'ports': ports})
for port in ports:
- if 'compute:' in port['device_owner']:
- if port['binding:host_id'] == host:
- port_dict = self.plugin._make_port_dict(
- port, process_extensions=False)
- vm_ports_by_host.append(port_dict)
- LOG.debug("Returning list of VM Ports on host %(host)s for subnet "
- "%(subnet)s ports %(ports)s",
- {'host': host, 'subnet': subnet, 'ports': vm_ports_by_host})
- return vm_ports_by_host
+ device_owner = port['device_owner']
+ if (utils.is_dvr_serviced(device_owner)):
+ if port[portbindings.HOST_ID] == host:
+ port_dict = self.plugin._make_port_dict(port,
+ process_extensions=False)
+ ports_by_host.append(port_dict)
+ LOG.debug("Returning list of dvr serviced ports on host %(host)s"
+ " for subnet %(subnet)s ports %(ports)s",
+ {'host': host, 'subnet': subnet,
+ 'ports': ports_by_host})
+ return ports_by_host
@log.log
def get_subnet_for_dvr(self, context, subnet):
from sqlalchemy.orm import joinedload
from neutron.common import constants
+from neutron.common import utils as n_utils
from neutron import context as n_ctx
from neutron.db import agents_db
from neutron.db import agentschedulers_db
if agentschedulers_db.AgentSchedulerDbMixin.is_eligible_agent(
active, l3_agent)]
- def check_vmexists_on_l3agent(self, context, l3_agent, router_id,
- subnet_id):
+ def check_ports_exist_on_l3agent(self, context, l3_agent, router_id,
+ subnet_id):
+ """
+ This function checks for existence of dvr serviceable
+ ports on the host, running the input l3agent.
+ """
if not subnet_id:
return True
filter = {'fixed_ips': {'subnet_id': [subnet_id]}}
ports = core_plugin.get_ports(context, filters=filter)
for port in ports:
- if ("compute:" in port['device_owner'] and
+ if (n_utils.is_dvr_serviced(port['device_owner']) and
l3_agent['host'] == port['binding:host_id']):
return True
not is_router_distributed):
candidates.append(l3_agent)
elif is_router_distributed and agent_mode.startswith('dvr') and (
- self.check_vmexists_on_l3agent(
+ self.check_ports_exist_on_l3agent(
context, l3_agent, sync_router['id'], subnet_id)):
candidates.append(l3_agent)
return candidates
from neutron.api.v2 import attributes
from neutron.common import constants as l3_const
from neutron.common import exceptions as n_exc
+from neutron.common import utils as n_utils
from neutron.db import l3_attrs_db
from neutron.db import l3_db
from neutron.db import l3_dvrscheduler_db as l3_dvrsched_db
def get_vm_port_hostid(self, context, port_id, port=None):
"""Return the portbinding host_id."""
vm_port_db = port or self._core_plugin.get_port(context, port_id)
- allowed_device_owners = ("neutron:LOADBALANCER", DEVICE_OWNER_AGENT_GW)
device_owner = vm_port_db['device_owner'] if vm_port_db else ""
- if (device_owner in allowed_device_owners or
- device_owner.startswith("compute:")):
+ if (n_utils.is_dvr_serviced(device_owner) or
+ device_owner == DEVICE_OWNER_AGENT_GW):
return vm_port_db[portbindings.HOST_ID]
def get_agent_gw_ports_exist_for_network(
from sqlalchemy.orm import exc
from neutron.common import constants as q_const
+from neutron.common import utils as n_utils
from neutron.db import agents_db
from neutron.db import l3_agentschedulers_db as l3agent_sch_db
from neutron.db import model_base
subnet_ids.add(int_subnet)
return subnet_ids
- def check_vm_exists_on_subnet(self, context, host, port_id, subnet_id):
- """Check if there is any vm exists on the subnet_id."""
+ def check_ports_active_on_host_and_subnet(self, context, host,
+ port_id, subnet_id):
+ """Check if there is any dvr serviceable port on the subnet_id."""
filter_sub = {'fixed_ips': {'subnet_id': [subnet_id]}}
ports = self._core_plugin.get_ports(context, filters=filter_sub)
for port in ports:
- if ("compute:" in port['device_owner']
+ if (n_utils.is_dvr_serviced(port['device_owner'])
and port['status'] == 'ACTIVE'
and port['binding:host_id'] == host
and port['id'] != port_id):
- LOG.debug('DVR: VM exists for subnet %(subnet_id)s on host '
- '%(host)s', {'subnet_id': subnet_id,
+ LOG.debug('DVR: Active port exists for subnet %(subnet_id)s '
+ 'on host %(host)s', {'subnet_id': subnet_id,
'host': host})
return True
return False
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
vm_exists_on_subnet = False
for subnet in subnet_ids:
- if self.check_vm_exists_on_subnet(context,
- port_host,
- port_id,
- subnet):
+ if self.check_ports_active_on_host_and_subnet(context,
+ port_host,
+ port_id,
+ subnet):
vm_exists_on_subnet = True
break
binding.host != host):
binding.host = host
changes = True
- if "compute:" in port['device_owner']:
+ # Whenever a DVR serviceable port comes up on a
+ # node, it has to be communicated to the L3 Plugin
+ # and agent for creating the respective namespaces.
+ if (utils.is_dvr_serviced(port['device_owner'])):
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if (utils.is_extension_supported(
return super(RpcCallbacks, self).get_dvr_mac_address_by_host(
rpc_context, host)
- def get_compute_ports_on_host_by_subnet(self, rpc_context, **kwargs):
+ def get_ports_on_host_by_subnet(self, rpc_context, **kwargs):
host = kwargs.get('host')
subnet = kwargs.get('subnet')
LOG.debug("DVR Agent requests list of VM ports on host %s", host)
- return super(RpcCallbacks, self).get_compute_ports_on_host_by_subnet(
+ return super(RpcCallbacks, self).get_ports_on_host_by_subnet(
rpc_context, host, subnet)
def get_subnet_for_dvr(self, rpc_context, **kwargs):
from neutron.api.rpc.handlers import dvr_rpc
from neutron.common import constants as n_const
+from neutron.common import utils as n_utils
from neutron.openstack.common import log as logging
from neutron.plugins.openvswitch.common import constants
subnet_info = ldm.get_subnet_info()
ip_subnet = subnet_info['cidr']
local_compute_ports = (
- self.plugin_rpc.get_compute_ports_on_host_by_subnet(
+ self.plugin_rpc.get_ports_on_host_by_subnet(
self.context, self.host, subnet_uuid))
LOG.debug("DVR: List of ports received from "
- "get_compute_ports_on_host_by_subnet %s",
+ "get_ports_on_host_by_subnet %s",
local_compute_ports)
for prt in local_compute_ports:
vif = self.int_br.get_vif_port_by_id(prt['id'])
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
- def _bind_compute_port_on_dvr_subnet(self, port, fixed_ips,
- device_owner, local_vlan):
+ def _bind_port_on_dvr_subnet(self, port, fixed_ips,
+ device_owner, local_vlan):
# Handle new compute port added use-case
subnet_uuid = None
for ips in fixed_ips:
device_owner,
local_vlan_id)
- if device_owner and device_owner.startswith('compute:'):
- self._bind_compute_port_on_dvr_subnet(port, fixed_ips,
- device_owner,
- local_vlan_id)
+ if device_owner and n_utils.is_dvr_serviced(device_owner):
+ self._bind_port_on_dvr_subnet(port, fixed_ips,
+ device_owner,
+ local_vlan_id)
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
self._bind_centralized_snat_port_on_dvr_subnet(port, fixed_ips,
# release port state
self.local_ports.pop(port.vif_id, None)
- def _unbind_compute_port_on_dvr_subnet(self, port, local_vlan):
+ def _unbind_port_on_dvr_subnet(self, port, local_vlan):
ovsport = self.local_ports[port.vif_id]
# This confirms that this compute port being removed belonged
self._unbind_distributed_router_interface_port(vif_port,
local_vlan_id)
- if device_owner and device_owner.startswith('compute:'):
- self._unbind_compute_port_on_dvr_subnet(vif_port,
- local_vlan_id)
+ if device_owner and n_utils.is_dvr_serviced(device_owner):
+ self._unbind_port_on_dvr_subnet(vif_port, local_vlan_id)
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
self._unbind_centralized_snat_port_on_dvr_subnet(vif_port,
from neutron.common import constants
from neutron.common import exceptions as exc
+from neutron.common import utils
from neutron import context
from neutron.extensions import multiprovidernet as mpnet
from neutron.extensions import portbindings
mock.call(ctx, disassociate_floatingips.return_value)
])
+ def test_check_if_compute_port_serviced_by_dvr(self):
+ self.assertTrue(utils.is_dvr_serviced('compute:None'))
+
+ def test_check_if_lbaas_vip_port_serviced_by_dvr(self):
+ self.assertTrue(utils.is_dvr_serviced(
+ constants.DEVICE_OWNER_LOADBALANCER))
+
+ def test_check_if_port_not_serviced_by_dvr(self):
+ self.assertFalse(utils.is_dvr_serviced(
+ constants.DEVICE_OWNER_ROUTER_INTF))
+
def test_disassociate_floatingips_do_notify_returns_nothing(self):
ctx = context.get_admin_context()
l3plugin = manager.NeutronManager.get_service_plugins().get(
'cidr': '1.1.1.0/24',
'gateway_mac': 'aa:bb:cc:11:22:33'}),
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
- 'get_compute_ports_on_host_by_subnet',
- return_value=[]),
+ 'get_ports_on_host_by_subnet',
+ return_value=[]),
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),
self.assertTrue(add_flow_tun_fn.called)
self.assertTrue(delete_flows_int_fn.called)
- def test_port_bound_for_dvr_with_compute_ports(self, ofport=10):
+ def _test_port_bound_for_dvr(self, device_owner):
self._setup_for_dvr_test()
with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'set_db_attribute',
'cidr': '1.1.1.0/24',
'gateway_mac': 'aa:bb:cc:11:22:33'}),
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
- 'get_compute_ports_on_host_by_subnet',
- return_value=[]),
+ 'get_ports_on_host_by_subnet',
+ return_value=[]),
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),
self.agent.port_bound(self._compute_port, self._net_uuid,
'vxlan', None, None,
self._compute_fixed_ips,
- "compute:None", False)
+ device_owner, False)
self.assertTrue(add_flow_tun_fn.called)
self.assertTrue(add_flow_int_fn.called)
self.assertTrue(delete_flows_int_fn.called)
+ def test_port_bound_for_dvr_with_compute_ports(self):
+ self._test_port_bound_for_dvr(device_owner="compute:None")
+
+ def test_port_bound_for_dvr_with_lbaas_vip_ports(self):
+ self._test_port_bound_for_dvr(
+ device_owner=n_const.DEVICE_OWNER_LOADBALANCER)
+
def test_port_bound_for_dvr_with_csnat_ports(self, ofport=10):
self._setup_for_dvr_test()
with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'cidr': '1.1.1.0/24',
'gateway_mac': 'aa:bb:cc:11:22:33'}),
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
- 'get_compute_ports_on_host_by_subnet',
- return_value=[]),
+ 'get_ports_on_host_by_subnet',
+ return_value=[]),
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),
'cidr': '1.1.1.0/24',
'gateway_mac': 'aa:bb:cc:11:22:33'}),
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
- 'get_compute_ports_on_host_by_subnet',
- return_value=[]),
+ 'get_ports_on_host_by_subnet',
+ return_value=[]),
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),
self.assertTrue(delete_flows_int_fn.called)
self.assertTrue(delete_flows_tun_fn.called)
- def test_treat_devices_removed_for_dvr_with_compute_ports(self, ofport=10):
+ def _test_treat_devices_removed_for_dvr(self, device_owner):
self._setup_for_dvr_test()
with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'set_db_attribute',
'cidr': '1.1.1.0/24',
'gateway_mac': 'aa:bb:cc:11:22:33'}),
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
- 'get_compute_ports_on_host_by_subnet',
- return_value=[]),
+ 'get_ports_on_host_by_subnet',
+ return_value=[]),
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),
self._net_uuid, 'vxlan',
None, None,
self._compute_fixed_ips,
- "compute:None", False)
+ device_owner, False)
self.assertTrue(add_flow_tun_fn.called)
self.assertTrue(add_flow_int_fn.called)
self.assertTrue(delete_flows_int_fn.called)
self.agent.treat_devices_removed([self._compute_port.vif_id])
self.assertTrue(delete_flows_int_fn.called)
+ def test_treat_devices_removed_for_dvr_with_compute_ports(self):
+ self._test_treat_devices_removed_for_dvr(device_owner="compute:None")
+
+ def test_treat_devices_removed_for_dvr_with_lbaas_vip_ports(self):
+ self._test_treat_devices_removed_for_dvr(
+ device_owner=n_const.DEVICE_OWNER_LOADBALANCER)
+
def test_treat_devices_removed_for_dvr_csnat_port(self, ofport=10):
self._setup_for_dvr_test()
with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'cidr': '1.1.1.0/24',
'gateway_mac': 'aa:bb:cc:11:22:33'}),
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
- 'get_compute_ports_on_host_by_subnet',
- return_value=[]),
+ 'get_ports_on_host_by_subnet',
+ return_value=[]),
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),
self.assertEqual(sub_ids.pop(),
dvr_port.get('fixed_ips').pop(0).get('subnet_id'))
- def test_check_vm_exists_on_subnet(self):
+ def test_check_ports_active_on_host_and_subnet(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'.L3AgentNotifyAPI')):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
- result = self.dut.check_vm_exists_on_subnet(
+ result = self.dut.check_ports_active_on_host_and_subnet(
self.adminContext,
'thisHost', 'dvr_port1',
sub_ids)
self.assertFalse(result)
+ def test_check_dvr_serviced_port_exists_on_subnet(self):
+ vip_port = {
+ 'id': 'lbaas-vip-port1',
+ 'device_id': 'vip-pool-id',
+ 'status': 'ACTIVE',
+ 'binding:host_id': 'thisHost',
+ 'device_owner': constants.DEVICE_OWNER_LOADBALANCER,
+ 'fixed_ips': [
+ {
+ 'subnet_id': 'my-subnet-id',
+ 'ip_address': '10.10.10.1'
+ }
+ ]
+ }
+
+ with contextlib.nested(
+ mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
+ '.get_ports', return_value=[vip_port]),
+ mock.patch('neutron.common.utils.is_dvr_serviced',
+ return_value=True)) as (get_ports_fn, dvr_serv_fn):
+ result = self.dut.check_ports_active_on_host_and_subnet(
+ self.adminContext,
+ 'thisHost',
+ 'dvr1-intf-id',
+ 'my-subnet-id')
+ self.assertTrue(result)
+ self.assertEqual(dvr_serv_fn.call_count, 1)
+
def test_schedule_snat_router_with_snat_candidates(self):
agent = agents_db.Agent()
agent.admin_state_up = True