]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
L2 Agent-side additions to support DVR
authorVivekanandan Narasimhan <vivekanandan.narasimhan@hp.com>
Mon, 14 Apr 2014 21:55:25 +0000 (14:55 -0700)
committerVivekanandan Narasimhan <vivekanandan.narasimhan@hp.com>
Tue, 22 Jul 2014 07:13:59 +0000 (00:13 -0700)
This patch introduces changes to the L2 agent, whereby the L2 agent
relies on a DVR component that takes care of the port wiring and the
management of tunnels in face of topology changes due to the life
cycles or VM's as well as the life cycles of distributed virtual
routers.

Support for DVR needs to be explicitly enabled. Default behavior
remains unchanged.

Partially-implements: blueprint neutron-ovs-dvr

Change-Id: If75225898a6f0aeea8b0300b711ca7e01f6b4f9a

neutron/agent/linux/ovs_lib.py
neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py [new file with mode: 0644]
neutron/plugins/openvswitch/agent/ovs_neutron_agent.py
neutron/plugins/openvswitch/common/constants.py
neutron/tests/unit/ofagent/test_ofa_neutron_agent.py
neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py
neutron/tests/unit/openvswitch/test_ovs_tunnel.py

index 9adb2d35dd9c1f3287014ec79eedea430c73265e..78967d1f3a3308b3cea22bfe7d493050b25ba2d8 100644 (file)
@@ -220,10 +220,22 @@ class OVSBridge(BaseOVS):
         return retval
 
     def defer_apply_on(self):
+        # TODO(vivek): when defer_apply_on is used, DVR
+        # flows are only getting partially configured when
+        # run concurrently with l2-pop ON.
+        # Will need make ovs_lib flow API context sensitive
+        # and then use the same across this file, which will
+        # address the race issue here.
         LOG.debug(_('defer_apply_on'))
         self.defer_apply_flows = True
 
     def defer_apply_off(self):
+        # TODO(vivek): when defer_apply_off is used, DVR
+        # flows are only getting partially configured when
+        # run concurrently with l2-pop ON.
+        # Will need make ovs_lib flow API context sensitive
+        # and then use the same across this file, which will
+        # address the race issue here.
         LOG.debug(_('defer_apply_off'))
         # Note(ethuleau): stash flows and disable deferred mode. Then apply
         # flows from the stashed reference to be sure to not purge flows that
diff --git a/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py
new file mode 100644 (file)
index 0000000..21bc170
--- /dev/null
@@ -0,0 +1,719 @@
+# Copyright 2014, Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+# @author: Vivekanandan Narasimhan, Hewlett-Packard Inc
+
+
+from neutron.api.rpc.handlers import dvr_rpc
+from neutron.common import constants as n_const
+from neutron.openstack.common import log as logging
+from neutron.plugins.openvswitch.common import constants
+
+
+LOG = logging.getLogger(__name__)
+
+
+# A class to represent a DVR-hosted subnet including vif_ports resident on
+# that subnet
+class LocalDVRSubnetMapping:
+    def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID):
+        # set of commpute ports on on this dvr subnet
+        self.compute_ports = {}
+        self.subnet = subnet
+        self.csnat_ofport = csnat_ofport
+        self.dvr_owned = False
+
+    def __str__(self):
+        return ("subnet = %s compute_ports = %s csnat_port = %s"
+                " is_dvr_owned = %s" %
+                (self.subnet, self.get_compute_ofports(),
+                 self.get_csnat_ofport(), self.is_dvr_owned()))
+
+    def get_subnet_info(self):
+        return self.subnet
+
+    def set_dvr_owned(self, owned):
+        self.dvr_owned = owned
+
+    def is_dvr_owned(self):
+        return self.dvr_owned
+
+    def add_compute_ofport(self, vif_id, ofport):
+        self.compute_ports[vif_id] = ofport
+
+    def remove_compute_ofport(self, vif_id):
+        self.compute_ports.pop(vif_id, 0)
+
+    def remove_all_compute_ofports(self):
+        self.compute_ports.clear()
+
+    def get_compute_ofports(self):
+        return self.compute_ports
+
+    def set_csnat_ofport(self, ofport):
+        self.csnat_ofport = ofport
+
+    def get_csnat_ofport(self):
+        return self.csnat_ofport
+
+
+class OVSPort:
+    def __init__(self, id, ofport, mac, device_owner):
+        self.id = id
+        self.mac = mac
+        self.ofport = ofport
+        self.subnets = set()
+        self.device_owner = device_owner
+
+    def __str__(self):
+        return ("OVSPort: id = %s, ofport = %s, mac = %s,"
+                "device_owner = %s, subnets = %s" %
+                (self.id, self.ofport, self.mac,
+                 self.device_owner, self.subnets))
+
+    def add_subnet(self, subnet_id):
+        self.subnets.add(subnet_id)
+
+    def remove_subnet(self, subnet_id):
+        self.subnets.remove(subnet_id)
+
+    def remove_all_subnets(self):
+        self.subnets.clear()
+
+    def get_subnets(self):
+        return self.subnets
+
+    def get_device_owner(self):
+        return self.device_owner
+
+    def get_mac(self):
+        return self.mac
+
+    def get_ofport(self):
+        return self.ofport
+
+
+class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin):
+    '''
+    Implements OVS-based DVR(Distributed Virtual Router), for overlay networks.
+    '''
+    # history
+    #   1.0 Initial version
+
+    def __init__(self, context, plugin_rpc, integ_br, tun_br,
+                 patch_int_ofport=constants.OFPORT_INVALID,
+                 patch_tun_ofport=constants.OFPORT_INVALID,
+                 host=None, enable_tunneling=False,
+                 enable_distributed_routing=False):
+        self.context = context
+        self.plugin_rpc = plugin_rpc
+        self.int_br = integ_br
+        self.tun_br = tun_br
+        self.patch_int_ofport = patch_int_ofport
+        self.patch_tun_ofport = patch_tun_ofport
+        self.host = host
+        self.enable_tunneling = enable_tunneling
+        self.enable_distributed_routing = enable_distributed_routing
+
+    def reset_ovs_parameters(self, integ_br, tun_br,
+                             patch_int_ofport, patch_tun_ofport):
+        '''Reset the openvswitch parameters'''
+        if not (self.enable_tunneling and self.enable_distributed_routing):
+            return
+        self.int_br = integ_br
+        self.tun_br = tun_br
+        self.patch_int_ofport = patch_int_ofport
+        self.patch_tun_ofport = patch_tun_ofport
+
+    def setup_dvr_flows_on_integ_tun_br(self):
+        '''Setup up initial dvr flows into br-int and br-tun'''
+        if not (self.enable_tunneling and self.enable_distributed_routing):
+            return
+        LOG.debug("L2 Agent operating in DVR Mode")
+        self.dvr_mac_address = None
+        self.local_dvr_map = {}
+        self.local_csnat_map = {}
+        self.local_ports = {}
+        self.registered_dvr_macs = set()
+        # get the local DVR MAC Address
+        try:
+            details = self.plugin_rpc.get_dvr_mac_address_by_host(
+                self.context, self.host)
+            LOG.debug("L2 Agent DVR: Received response for "
+                      "get_dvr_mac_address_by_host() from "
+                      "plugin: %r", details)
+            self.dvr_mac_address = details['mac_address']
+        except Exception:
+            LOG.error(_("DVR: Failed to obtain local DVR Mac address"))
+            self.enable_distributed_routing = False
+            # switch all traffic using L2 learning
+            self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
+                                 priority=1, actions="normal")
+            return
+
+        # Remove existing flows in integration bridge
+        self.int_br.remove_all_flows()
+
+        # Add a canary flow to int_br to track OVS restarts
+        self.int_br.add_flow(table=constants.CANARY_TABLE, priority=0,
+                             actions="drop")
+
+        # Insert 'drop' action as the default for Table DVR_TO_SRC_MAC
+        self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
+                             priority=1,
+                             actions="drop")
+
+        # Insert 'normal' action as the default for Table LOCAL_SWITCHING
+        self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
+                             priority=1,
+                             actions="normal")
+
+        dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context)
+        LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs)
+        for mac in dvr_macs:
+            if mac['mac_address'] == self.dvr_mac_address:
+                continue
+            # Table 0 (default) will now sort DVR traffic from other
+            # traffic depending on in_port
+            self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
+                                 priority=2,
+                                 in_port=self.patch_tun_ofport,
+                                 dl_src=mac['mac_address'],
+                                 actions="resubmit(,%s)" %
+                                 constants.DVR_TO_SRC_MAC)
+            # Table DVR_NOT_LEARN ensures unique dvr macs in the cloud
+            # are not learnt, as they may
+            # result in flow explosions
+            self.tun_br.add_flow(table=constants.DVR_NOT_LEARN,
+                                 priority=1,
+                                 dl_src=mac['mac_address'],
+                                 actions="output:%s" % self.patch_int_ofport)
+
+            self.registered_dvr_macs.add(mac['mac_address'])
+
+        self.tun_br.add_flow(priority=1,
+                             in_port=self.patch_int_ofport,
+                             actions="resubmit(,%s)" %
+                             constants.DVR_PROCESS)
+        # table-miss should be sent to learning table
+        self.tun_br.add_flow(table=constants.DVR_NOT_LEARN,
+                             priority=0,
+                             actions="resubmit(,%s)" %
+                             constants.LEARN_FROM_TUN)
+
+        self.tun_br.add_flow(table=constants.DVR_PROCESS,
+                             priority=0,
+                             actions="resubmit(,%s)" %
+                             constants.PATCH_LV_TO_TUN)
+
+    def dvr_mac_address_update(self, dvr_macs):
+        if not (self.enable_tunneling and self.enable_distributed_routing):
+            return
+
+        LOG.debug("DVR Mac address update with host-mac: %s", dvr_macs)
+
+        if not self.dvr_mac_address:
+            LOG.debug("Self mac unknown, ignoring this "
+                      "dvr_mac_address_update() ")
+            return
+
+        dvr_host_macs = set()
+        for entry in dvr_macs:
+            if entry['mac_address'] == self.dvr_mac_address:
+                continue
+            dvr_host_macs.add(entry['mac_address'])
+
+        if dvr_host_macs == self.registered_dvr_macs:
+            LOG.debug("DVR Mac address already up to date")
+            return
+
+        dvr_macs_added = dvr_host_macs - self.registered_dvr_macs
+        dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs
+
+        for oldmac in dvr_macs_removed:
+            self.int_br.delete_flows(table=constants.LOCAL_SWITCHING,
+                                     in_port=self.patch_tun_ofport,
+                                     dl_src=oldmac)
+            self.tun_br.delete_flows(table=constants.DVR_NOT_LEARN,
+                                     dl_src=oldmac)
+            LOG.debug("Removed DVR MAC flow for %s", oldmac)
+            self.registered_dvr_macs.remove(oldmac)
+
+        for newmac in dvr_macs_added:
+            self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
+                                 priority=2,
+                                 in_port=self.patch_tun_ofport,
+                                 dl_src=newmac,
+                                 actions="resubmit(,%s)" %
+                                 constants.DVR_TO_SRC_MAC)
+            self.tun_br.add_flow(table=constants.DVR_NOT_LEARN,
+                                 priority=1,
+                                 dl_src=newmac,
+                                 actions="output:%s" % self.patch_int_ofport)
+            LOG.debug("Added DVR MAC flow for %s", newmac)
+            self.registered_dvr_macs.add(newmac)
+
+    def is_dvr_router_interface(self, device_owner):
+        return device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE
+
+    def process_tunneled_network(self, network_type, lvid, segmentation_id):
+        if not (self.enable_tunneling and self.enable_distributed_routing):
+            return
+        self.tun_br.add_flow(table=constants.TUN_TABLE[network_type],
+                             priority=1,
+                             tun_id=segmentation_id,
+                             actions="mod_vlan_vid:%s,"
+                             "resubmit(,%s)" %
+                             (lvid, constants.DVR_NOT_LEARN))
+
+    def _bind_distributed_router_interface_port(self, port, fixed_ips,
+                                                device_owner, local_vlan):
+        # since router port must have only one fixed IP, directly
+        # use fixed_ips[0]
+        subnet_uuid = fixed_ips[0]['subnet_id']
+        csnat_ofport = constants.OFPORT_INVALID
+        ldm = None
+        if subnet_uuid in self.local_dvr_map:
+            ldm = self.local_dvr_map[subnet_uuid]
+            csnat_ofport = ldm.get_csnat_ofport()
+            if csnat_ofport == constants.OFPORT_INVALID:
+                LOG.error(_("DVR: Duplicate DVR router interface detected "
+                          "for subnet %s"), subnet_uuid)
+                return
+        else:
+            # set up LocalDVRSubnetMapping available for this subnet
+            subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context,
+                                                             subnet_uuid)
+            if not subnet_info:
+                LOG.error(_("DVR: Unable to retrieve subnet information"
+                          " for subnet_id %s"), subnet_uuid)
+                return
+            LOG.debug("get_subnet_for_dvr for subnet %s returned with %s" %
+                      (subnet_uuid, subnet_info))
+            ldm = LocalDVRSubnetMapping(subnet_info)
+            self.local_dvr_map[subnet_uuid] = ldm
+
+        # DVR takes over
+        ldm.set_dvr_owned(True)
+
+        subnet_info = ldm.get_subnet_info()
+        ip_subnet = subnet_info['cidr']
+        local_compute_ports = (
+            self.plugin_rpc.get_compute_ports_on_host_by_subnet(
+                self.context, self.host, subnet_uuid))
+        LOG.debug("DVR: List of ports received from "
+                  "get_compute_ports_on_host_by_subnet %s",
+                  local_compute_ports)
+        for prt in local_compute_ports:
+            vif = self.int_br.get_vif_port_by_id(prt['id'])
+            if not vif:
+                continue
+            ldm.add_compute_ofport(vif.vif_id, vif.ofport)
+            if vif.vif_id in self.local_ports:
+                # ensure if a compute port is already on
+                # a different dvr routed subnet
+                # if yes, queue this subnet to that port
+                ovsport = self.local_ports[vif.vif_id]
+                ovsport.add_subnet(subnet_uuid)
+            else:
+                # the compute port is discovered first here that its on
+                # a dvr routed subnet queue this subnet to that port
+                ovsport = OVSPort(vif.vif_id, vif.ofport,
+                                  vif.vif_mac, prt['device_owner'])
+
+                ovsport.add_subnet(subnet_uuid)
+                self.local_ports[vif.vif_id] = ovsport
+
+            # create rule for just this vm port
+            self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
+                                 priority=4,
+                                 dl_vlan=local_vlan,
+                                 dl_dst=ovsport.get_mac(),
+                                 actions="strip_vlan,mod_dl_src:%s,"
+                                 "output:%s" %
+                                 (subnet_info['gateway_mac'],
+                                  ovsport.get_ofport()))
+
+        # create rule to forward broadcast/multicast frames from dvr
+        # router interface to appropriate local tenant ports
+        ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
+        if csnat_ofport != constants.OFPORT_INVALID:
+            ofports = str(csnat_ofport) + ',' + ofports
+        if ofports:
+            self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
+                                 priority=2,
+                                 proto='ip',
+                                 dl_vlan=local_vlan,
+                                 nw_dst=ip_subnet,
+                                 actions="strip_vlan,mod_dl_src:%s,"
+                                 "output:%s" %
+                                 (subnet_info['gateway_mac'], ofports))
+
+        self.tun_br.add_flow(table=constants.DVR_PROCESS,
+                             priority=3,
+                             dl_vlan=local_vlan,
+                             proto='arp',
+                             nw_dst=subnet_info['gateway_ip'],
+                             actions="drop")
+
+        self.tun_br.add_flow(table=constants.DVR_PROCESS,
+                             priority=2,
+                             dl_vlan=local_vlan,
+                             dl_dst=port.vif_mac,
+                             actions="drop")
+
+        self.tun_br.add_flow(table=constants.DVR_PROCESS,
+                             priority=1,
+                             dl_vlan=local_vlan,
+                             dl_src=port.vif_mac,
+                             actions="mod_dl_src:%s,resubmit(,%s)" %
+                             (self.dvr_mac_address,
+                              constants.PATCH_LV_TO_TUN))
+
+        # the dvr router interface is itself a port, so capture it
+        # queue this subnet to that port. A subnet appears only once as
+        # a router interface on any given router
+        ovsport = OVSPort(port.vif_id, port.ofport,
+                          port.vif_mac, device_owner)
+        ovsport.add_subnet(subnet_uuid)
+        self.local_ports[port.vif_id] = ovsport
+
+    def _bind_compute_port_on_dvr_subnet(self, port, fixed_ips,
+                                         device_owner, local_vlan):
+        # Handle new compute port added use-case
+        subnet_uuid = None
+        for ips in fixed_ips:
+            if ips['subnet_id'] not in self.local_dvr_map:
+                continue
+            subnet_uuid = ips['subnet_id']
+            ldm = self.local_dvr_map[subnet_uuid]
+            if not ldm.is_dvr_owned():
+                # well this is csnat stuff, let dvr come in
+                # and do plumbing for this vm later
+                continue
+
+            # This confirms that this compute port belongs
+            # to a dvr hosted subnet.
+            # Accomodate this VM Port into the existing rule in
+            # the integration bridge
+            LOG.debug("DVR: Plumbing compute port %s", port.vif_id)
+            subnet_info = ldm.get_subnet_info()
+            ip_subnet = subnet_info['cidr']
+            csnat_ofport = ldm.get_csnat_ofport()
+            ldm.add_compute_ofport(port.vif_id, port.ofport)
+            if port.vif_id in self.local_ports:
+                # ensure if a compute port is already on a different
+                # dvr routed subnet
+                # if yes, queue this subnet to that port
+                ovsport = self.local_ports[port.vif_id]
+                ovsport.add_subnet(subnet_uuid)
+            else:
+                # the compute port is discovered first here that its
+                # on a dvr routed subnet, queue this subnet to that port
+                ovsport = OVSPort(port.vif_id, port.ofport,
+                                  port.vif_mac, device_owner)
+
+                ovsport.add_subnet(subnet_uuid)
+                self.local_ports[port.vif_id] = ovsport
+            # create a rule for this vm port
+            self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
+                                 priority=4,
+                                 dl_vlan=local_vlan,
+                                 dl_dst=ovsport.get_mac(),
+                                 actions="strip_vlan,mod_dl_src:%s,"
+                                 "output:%s" %
+                                 (subnet_info['gateway_mac'],
+                                  ovsport.get_ofport()))
+            ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
+
+            if csnat_ofport != constants.OFPORT_INVALID:
+                ofports = str(csnat_ofport) + ',' + ofports
+            self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
+                                 priority=2,
+                                 proto='ip',
+                                 dl_vlan=local_vlan,
+                                 nw_dst=ip_subnet,
+                                 actions="strip_vlan,mod_dl_src:%s,"
+                                 " output:%s" %
+                                 (subnet_info['gateway_mac'], ofports))
+
+    def _bind_centralized_snat_port_on_dvr_subnet(self, port, fixed_ips,
+                                                  device_owner, local_vlan):
+        if port.vif_id in self.local_ports:
+            # throw an error if CSNAT port is already on a different
+            # dvr routed subnet
+            ovsport = self.local_ports[port.vif_id]
+            subs = list(ovsport.get_subnets())
+            LOG.error(_("Centralized-SNAT port %s already seen on "),
+                      port.vif_id)
+            LOG.error(_("a different subnet %s"), subs[0])
+            return
+        # since centralized-SNAT (CSNAT) port must have only one fixed
+        # IP, directly use fixed_ips[0]
+        subnet_uuid = fixed_ips[0]['subnet_id']
+        ldm = None
+        subnet_info = None
+        if subnet_uuid not in self.local_dvr_map:
+            # no csnat ports seen on this subnet - create csnat state
+            # for this subnet
+            subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context,
+                                                             subnet_uuid)
+            ldm = LocalDVRSubnetMapping(subnet_info, port.ofport)
+            self.local_dvr_map[subnet_uuid] = ldm
+        else:
+            ldm = self.local_dvr_map[subnet_uuid]
+            subnet_info = ldm.get_subnet_info()
+            # Store csnat OF Port in the existing DVRSubnetMap
+            ldm.set_csnat_ofport(port.ofport)
+
+        # create ovsPort footprint for csnat port
+        ovsport = OVSPort(port.vif_id, port.ofport,
+                          port.vif_mac, device_owner)
+        ovsport.add_subnet(subnet_uuid)
+        self.local_ports[port.vif_id] = ovsport
+
+        self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
+                             priority=4,
+                             dl_vlan=local_vlan,
+                             dl_dst=ovsport.get_mac(),
+                             actions="strip_vlan,mod_dl_src:%s,"
+                             " output:%s" %
+                             (subnet_info['gateway_mac'],
+                              ovsport.get_ofport()))
+        ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
+        ofports = str(ldm.get_csnat_ofport()) + ',' + ofports
+        ip_subnet = subnet_info['cidr']
+        self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
+                             priority=2,
+                             proto='ip',
+                             dl_vlan=local_vlan,
+                             nw_dst=ip_subnet,
+                             actions="strip_vlan,mod_dl_src:%s,"
+                             " output:%s" %
+                             (subnet_info['gateway_mac'], ofports))
+
+    def bind_port_to_dvr(self, port, network_type, fixed_ips,
+                         device_owner, local_vlan_id):
+        # a port coming up as distributed router interface
+        if not (self.enable_tunneling and self.enable_distributed_routing):
+            return
+
+        if network_type not in constants.TUNNEL_NETWORK_TYPES:
+            return
+
+        if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
+            self._bind_distributed_router_interface_port(port, fixed_ips,
+                                                         device_owner,
+                                                         local_vlan_id)
+
+        if device_owner and device_owner.startswith('compute:'):
+            self._bind_compute_port_on_dvr_subnet(port, fixed_ips,
+                                                  device_owner,
+                                                  local_vlan_id)
+
+        if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
+            self._bind_centralized_snat_port_on_dvr_subnet(port, fixed_ips,
+                                                           device_owner,
+                                                           local_vlan_id)
+
+    def _unbind_distributed_router_interface_port(self, port, local_vlan):
+
+        ovsport = self.local_ports[port.vif_id]
+
+        # removal of distributed router interface
+        subnet_ids = ovsport.get_subnets()
+        subnet_set = set(subnet_ids)
+        # ensure we process for all the subnets laid on this removed port
+        for sub_uuid in subnet_set:
+            if sub_uuid not in self.local_dvr_map:
+                continue
+
+            ldm = self.local_dvr_map[sub_uuid]
+            subnet_info = ldm.get_subnet_info()
+            ip_subnet = subnet_info['cidr']
+
+            # DVR is no more owner
+            ldm.set_dvr_owned(False)
+
+            # remove all vm rules for this dvr subnet
+            # clear of compute_ports altogether
+            compute_ports = ldm.get_compute_ofports()
+            for vif_id in compute_ports:
+                ovsport = self.local_ports[vif_id]
+                self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
+                                         dl_vlan=local_vlan,
+                                         dl_dst=ovsport.get_mac())
+            ldm.remove_all_compute_ofports()
+
+            if ldm.get_csnat_ofport() != -1:
+                # If there is a csnat port on this agent, preserve
+                # the local_dvr_map state
+                ofports = str(ldm.get_csnat_ofport())
+                self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
+                                     priority=2,
+                                     proto='ip',
+                                     dl_vlan=local_vlan,
+                                     nw_dst=ip_subnet,
+                                     actions="strip_vlan,mod_dl_src:%s,"
+                                     " output:%s" %
+                                     (subnet_info['gateway_mac'], ofports))
+            else:
+                # removed port is a distributed router interface
+                self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
+                                         proto='ip', dl_vlan=local_vlan,
+                                         nw_dst=ip_subnet)
+                # remove subnet from local_dvr_map as no dvr (or) csnat
+                # ports available on this agent anymore
+                self.local_dvr_map.pop(sub_uuid, None)
+
+            self.tun_br.delete_flows(table=constants.DVR_PROCESS,
+                                     dl_vlan=local_vlan,
+                                     proto='arp',
+                                     nw_dst=subnet_info['gateway_ip'])
+            ovsport.remove_subnet(sub_uuid)
+
+        self.tun_br.delete_flows(table=constants.DVR_PROCESS,
+                                 dl_vlan=local_vlan,
+                                 dl_dst=port.vif_mac)
+
+        self.tun_br.delete_flows(table=constants.DVR_PROCESS,
+                                 dl_vlan=local_vlan,
+                                 dl_src=port.vif_mac)
+        # release port state
+        self.local_ports.pop(port.vif_id, None)
+
+    def _unbind_compute_port_on_dvr_subnet(self, port, local_vlan):
+
+        ovsport = self.local_ports[port.vif_id]
+        # This confirms that this compute port being removed belonged
+        # to a dvr hosted subnet.
+        # Accomodate this VM Port into the existing rule in
+        # the integration bridge
+        LOG.debug("DVR: Removing plumbing for compute port %s", port)
+        subnet_ids = ovsport.get_subnets()
+        # ensure we process for all the subnets laid on this port
+        for sub_uuid in subnet_ids:
+            if sub_uuid not in self.local_dvr_map:
+                continue
+
+            ldm = self.local_dvr_map[sub_uuid]
+            subnet_info = ldm.get_subnet_info()
+            ldm.remove_compute_ofport(port.vif_id)
+            ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
+            ip_subnet = subnet_info['cidr']
+
+            # first remove this vm port rule
+            self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
+                                     dl_vlan=local_vlan,
+                                     dl_dst=ovsport.get_mac())
+            if ldm.get_csnat_ofport() != -1:
+                # If there is a csnat port on this agent, preserve
+                # the local_dvr_map state
+                ofports = str(ldm.get_csnat_ofport()) + ',' + ofports
+                self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
+                                     priority=2,
+                                     proto='ip',
+                                     dl_vlan=local_vlan,
+                                     nw_dst=ip_subnet,
+                                     actions="strip_vlan,mod_dl_src:%s,"
+                                     " output:%s" %
+                                     (subnet_info['gateway_mac'], ofports))
+            else:
+                if ofports:
+                    self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
+                                         priority=2,
+                                         proto='ip',
+                                         dl_vlan=local_vlan,
+                                         nw_dst=ip_subnet,
+                                         actions="strip_vlan,mod_dl_src:%s,"
+                                         " output:%s" %
+                                         (subnet_info['gateway_mac'],
+                                          ofports))
+                else:
+                    # remove the flow altogether, as no ports (both csnat/
+                    # compute) are available on this subnet in this
+                    # agent
+                    self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
+                                             proto='ip',
+                                             dl_vlan=local_vlan,
+                                             nw_dst=ip_subnet)
+        # release port state
+        self.local_ports.pop(port.vif_id, None)
+
+    def _unbind_centralized_snat_port_on_dvr_subnet(self, port, local_vlan):
+
+        ovsport = self.local_ports[port.vif_id]
+        # This comfirms that this compute port being removed belonged
+        # to a dvr hosted subnet.
+        # Accomodate this VM Port into the existing rule in
+        # the integration bridge
+        LOG.debug("DVR: Removing plumbing for csnat port %s", port)
+        sub_uuid = list(ovsport.get_subnets())[0]
+        # ensure we process for all the subnets laid on this port
+        if sub_uuid not in self.local_dvr_map:
+            return
+        ldm = self.local_dvr_map[sub_uuid]
+        subnet_info = ldm.get_subnet_info()
+        ip_subnet = subnet_info['cidr']
+        ldm.set_csnat_ofport(constants.OFPORT_INVALID)
+        # then remove csnat port rule
+        self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
+                                 dl_vlan=local_vlan,
+                                 dl_dst=ovsport.get_mac())
+
+        ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
+        if ofports:
+            self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
+                                 priority=2,
+                                 proto='ip',
+                                 dl_vlan=local_vlan,
+                                 nw_dst=ip_subnet,
+                                 actions="strip_vlan,mod_dl_src:%s,"
+                                 " output:%s" %
+                                 (subnet_info['gateway_mac'], ofports))
+        else:
+            self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
+                                     proto='ip',
+                                     dl_vlan=local_vlan,
+                                     nw_dst=ip_subnet)
+        if not ldm.is_dvr_owned():
+            # if not owned by DVR (only used for csnat), remove this
+            # subnet state altogether
+            self.local_dvr_map.pop(sub_uuid, None)
+
+        # release port state
+        self.local_ports.pop(port.vif_id, None)
+
+    def unbind_port_from_dvr(self, vif_port, local_vlan_id):
+        if not (self.enable_tunneling and self.enable_distributed_routing):
+            return
+        # Handle port removed use-case
+        if vif_port and vif_port.vif_id not in self.local_ports:
+            LOG.debug("DVR: Non distributed port, ignoring %s", vif_port)
+            return
+
+        ovsport = self.local_ports[vif_port.vif_id]
+        device_owner = ovsport.get_device_owner()
+
+        if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
+            self._unbind_distributed_router_interface_port(vif_port,
+                                                           local_vlan_id)
+
+        if device_owner and device_owner.startswith('compute:'):
+            self._unbind_compute_port_on_dvr_subnet(vif_port,
+                                                    local_vlan_id)
+
+        if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
+            self._unbind_centralized_snat_port_on_dvr_subnet(vif_port,
+                                                             local_vlan_id)
index 35fb9fb22101f430a090d09d02e03a34d2d371e8..4f8c2b60be3137807769336ed7bf58c7b99480c2 100644 (file)
@@ -23,6 +23,7 @@ import eventlet
 eventlet.monkey_patch()
 
 import netaddr
+from neutron.plugins.openvswitch.agent import ovs_dvr_neutron_agent
 from oslo.config import cfg
 from six import moves
 
@@ -33,6 +34,7 @@ from neutron.agent.linux import polling
 from neutron.agent.linux import utils
 from neutron.agent import rpc as agent_rpc
 from neutron.agent import securitygroups_rpc as sg_rpc
+from neutron.api.rpc.handlers import dvr_rpc
 from neutron.common import config as common_config
 from neutron.common import constants as q_const
 from neutron.common import exceptions
@@ -80,6 +82,7 @@ class LocalVLANMapping:
 
 
 class OVSPluginApi(agent_rpc.PluginApi,
+                   dvr_rpc.DVRServerRpcApiMixin,
                    sg_rpc.SecurityGroupServerRpcApiMixin):
     pass
 
@@ -94,7 +97,8 @@ class OVSSecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin):
 
 class OVSNeutronAgent(n_rpc.RpcCallback,
                       sg_rpc.SecurityGroupAgentRpcCallbackMixin,
-                      l2population_rpc.L2populationRpcCallBackMixin):
+                      l2population_rpc.L2populationRpcCallBackMixin,
+                      dvr_rpc.DVRAgentRpcCallbackMixin):
     '''Implements OVS-based tunneling, VLANs and flat networks.
 
     Two local bridges are created: an integration bridge (defaults to
@@ -124,12 +128,14 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
     # history
     #   1.0 Initial version
     #   1.1 Support Security Group RPC
-    RPC_API_VERSION = '1.1'
+    #   1.2 Support DVR (Distributed Virtual Router) RPC
+    RPC_API_VERSION = '1.2'
 
     def __init__(self, integ_br, tun_br, local_ip,
                  bridge_mappings, root_helper,
                  polling_interval, tunnel_types=None,
                  veth_mtu=None, l2_population=False,
+                 enable_distributed_routing=False,
                  minimize_polling=False,
                  ovsdb_monitor_respawn_interval=(
                      constants.DEFAULT_OVSDBMON_RESPAWN),
@@ -171,6 +177,7 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
         self.arp_responder_enabled = (arp_responder and
                                       self._check_arp_responder_support() and
                                       self.l2_pop)
+        self.enable_distributed_routing = enable_distributed_routing
         self.agent_state = {
             'binary': 'neutron-openvswitch-agent',
             'host': cfg.CONF.host,
@@ -180,7 +187,9 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
                                'tunneling_ip': local_ip,
                                'l2_population': self.l2_pop,
                                'arp_responder_enabled':
-                               self.arp_responder_enabled},
+                               self.arp_responder_enabled,
+                               'enable_distributed_routing':
+                               self.enable_distributed_routing},
             'agent_type': q_const.AGENT_TYPE_OVS,
             'start_flag': True}
 
@@ -211,8 +220,26 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
         self.vxlan_udp_port = cfg.CONF.AGENT.vxlan_udp_port
         self.dont_fragment = cfg.CONF.AGENT.dont_fragment
         self.tun_br = None
+        self.patch_int_ofport = constants.OFPORT_INVALID
+        self.patch_tun_ofport = constants.OFPORT_INVALID
         if self.enable_tunneling:
+            # The patch_int_ofport and patch_tun_ofport are updated
+            # here inside the call to setup_tunnel_br
             self.setup_tunnel_br(tun_br)
+
+        self.dvr_agent = ovs_dvr_neutron_agent.OVSDVRNeutronAgent(
+            self.context,
+            self.plugin_rpc,
+            self.int_br,
+            self.tun_br,
+            self.patch_int_ofport,
+            self.patch_tun_ofport,
+            cfg.CONF.host,
+            self.enable_tunneling,
+            self.enable_distributed_routing)
+
+        self.dvr_agent.setup_dvr_flows_on_integ_tun_br()
+
         # Collect additional bridges to monitor
         self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br)
 
@@ -263,7 +290,8 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
         consumers = [[topics.PORT, topics.UPDATE],
                      [topics.NETWORK, topics.DELETE],
                      [constants.TUNNEL, topics.UPDATE],
-                     [topics.SECURITY_GROUP, topics.UPDATE]]
+                     [topics.SECURITY_GROUP, topics.UPDATE],
+                     [topics.DVR, topics.UPDATE]]
         if self.l2_pop:
             consumers.append([topics.L2POPULATION,
                               topics.UPDATE, cfg.CONF.host])
@@ -332,7 +360,8 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
             agent_ports = values.get('ports')
             agent_ports.pop(self.local_ip, None)
             if len(agent_ports):
-                self.tun_br.defer_apply_on()
+                if not self.enable_distributed_routing:
+                    self.tun_br.defer_apply_on()
                 for agent_ip, ports in agent_ports.items():
                     # Ensure we have a tunnel port with this remote agent
                     ofport = self.tun_br_ofports[
@@ -347,8 +376,9 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
                         if ofport == 0:
                             continue
                     for port in ports:
-                        self._add_fdb_flow(port, lvm, ofport)
-                self.tun_br.defer_apply_off()
+                        self._add_fdb_flow(port, agent_ip, lvm, ofport)
+                if not self.enable_distributed_routing:
+                    self.tun_br.defer_apply_off()
 
     def fdb_remove(self, context, fdb_entries):
         LOG.debug(_("fdb_remove received"))
@@ -360,17 +390,19 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
             agent_ports = values.get('ports')
             agent_ports.pop(self.local_ip, None)
             if len(agent_ports):
-                self.tun_br.defer_apply_on()
+                if not self.enable_distributed_routing:
+                    self.tun_br.defer_apply_on()
                 for agent_ip, ports in agent_ports.items():
                     ofport = self.tun_br_ofports[
                         lvm.network_type].get(agent_ip)
                     if not ofport:
                         continue
                     for port in ports:
-                        self._del_fdb_flow(port, lvm, ofport)
-                self.tun_br.defer_apply_off()
+                        self._del_fdb_flow(port, agent_ip, lvm, ofport)
+                if not self.enable_distributed_routing:
+                    self.tun_br.defer_apply_off()
 
-    def _add_fdb_flow(self, port_info, lvm, ofport):
+    def _add_fdb_flow(self, port_info, agent_ip, lvm, ofport):
         if port_info == q_const.FLOODING_ENTRY:
             lvm.tun_ofports.add(ofport)
             ofports = ','.join(lvm.tun_ofports)
@@ -381,14 +413,16 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
         else:
             self._set_arp_responder('add', lvm.vlan, port_info[0],
                                     port_info[1])
-            self.tun_br.add_flow(table=constants.UCAST_TO_TUN,
-                                 priority=2,
-                                 dl_vlan=lvm.vlan,
-                                 dl_dst=port_info[0],
-                                 actions="strip_vlan,set_tunnel:%s,output:%s" %
-                                 (lvm.segmentation_id, ofport))
+            if not self.dvr_agent.is_dvr_router_interface(port_info[1]):
+                self.tun_br.add_flow(table=constants.UCAST_TO_TUN,
+                                     priority=2,
+                                     dl_vlan=lvm.vlan,
+                                     dl_dst=port_info[0],
+                                     actions="strip_vlan,set_tunnel:%s,"
+                                     "output:%s" %
+                                     (lvm.segmentation_id, ofport))
 
-    def _del_fdb_flow(self, port_info, lvm, ofport):
+    def _del_fdb_flow(self, port_info, agent_ip, lvm, ofport):
         if port_info == q_const.FLOODING_ENTRY:
             lvm.tun_ofports.remove(ofport)
             if len(lvm.tun_ofports) > 0:
@@ -545,11 +579,18 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
                                          (segmentation_id, ofports))
                 # inbound from tunnels: set lvid in the right table
                 # and resubmit to Table LEARN_FROM_TUN for mac learning
-                self.tun_br.add_flow(table=constants.TUN_TABLE[network_type],
-                                     priority=1,
-                                     tun_id=segmentation_id,
-                                     actions="mod_vlan_vid:%s,resubmit(,%s)" %
-                                     (lvid, constants.LEARN_FROM_TUN))
+                if self.enable_distributed_routing:
+                    self.dvr_agent.process_tunneled_network(
+                        network_type, lvid, segmentation_id)
+                else:
+                    self.tun_br.add_flow(
+                        table=constants.TUN_TABLE[network_type],
+                        priority=1,
+                        tun_id=segmentation_id,
+                        actions="mod_vlan_vid:%s,"
+                        "resubmit(,%s)" %
+                        (lvid, constants.LEARN_FROM_TUN))
+
             else:
                 LOG.error(_("Cannot provision %(network_type)s network for "
                           "net-id=%(net_uuid)s - tunneling disabled"),
@@ -664,7 +705,8 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
         self.available_local_vlans.add(lvm.vlan)
 
     def port_bound(self, port, net_uuid,
-                   network_type, physical_network, segmentation_id,
+                   network_type, physical_network,
+                   segmentation_id, fixed_ips, device_owner,
                    ovs_restarted):
         '''Bind port to net_uuid/lsw_id and install flow for inbound traffic
         to vm.
@@ -674,6 +716,8 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
         :param network_type: the network type ('gre', 'vlan', 'flat', 'local')
         :param physical_network: the physical network for 'vlan' or 'flat'
         :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
+        :param fixed_ips: the ip addresses assigned to this port
+        :param device_owner: the string indicative of owner of this port
         :param ovs_restarted: indicates if this is called for an OVS restart.
         '''
         if net_uuid not in self.local_vlan_map or ovs_restarted:
@@ -681,6 +725,11 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
                                       physical_network, segmentation_id)
         lvm = self.local_vlan_map[net_uuid]
         lvm.vif_ports[port.vif_id] = port
+
+        self.dvr_agent.bind_port_to_dvr(port, network_type, fixed_ips,
+                                        device_owner,
+                                        local_vlan_id=lvm.vlan)
+
         # Do not bind a port if it's already bound
         cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag")
         if cur_tag != str(lvm.vlan):
@@ -702,11 +751,16 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
             net_uuid = self.get_net_uuid(vif_id)
 
         if not self.local_vlan_map.get(net_uuid):
-            LOG.info(_('port_unbound() net_uuid %s not in local_vlan_map'),
+            LOG.info(_('port_unbound(): net_uuid %s not in local_vlan_map'),
                      net_uuid)
             return
 
         lvm = self.local_vlan_map[net_uuid]
+
+        if vif_id in lvm.vif_ports:
+            vif_port = lvm.vif_ports[vif_id]
+            self.dvr_agent.unbind_port_from_dvr(vif_port,
+                                                local_vlan_id=lvm.vlan)
         lvm.vif_ports.pop(vif_id, None)
 
         if not lvm.vif_ports:
@@ -1039,7 +1093,7 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
 
     def treat_vif_port(self, vif_port, port_id, network_id, network_type,
                        physical_network, segmentation_id, admin_state_up,
-                       ovs_restarted):
+                       fixed_ips, device_owner, ovs_restarted):
         # When this function is called for a port, the port should have
         # an OVS ofport configured, as only these ports were considered
         # for being treated. If that does not happen, it is a potential
@@ -1051,7 +1105,7 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
             if admin_state_up:
                 self.port_bound(vif_port, network_id, network_type,
                                 physical_network, segmentation_id,
-                                ovs_restarted)
+                                fixed_ips, device_owner, ovs_restarted)
             else:
                 self.port_dead(vif_port)
         else:
@@ -1117,7 +1171,8 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
             devices_details_list = self.plugin_rpc.get_devices_details_list(
                 self.context,
                 devices,
-                self.agent_id)
+                self.agent_id,
+                cfg.CONF.host)
         except Exception as e:
             raise DeviceListRetrievalError(devices=devices, error=e)
         for details in devices_details_list:
@@ -1140,6 +1195,8 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
                                     details['physical_network'],
                                     details['segmentation_id'],
                                     details['admin_state_up'],
+                                    details['fixed_ips'],
+                                    details['device_owner'],
                                     ovs_restarted)
                 # update plugin about port status
                 # FIXME(salv-orlando): Failures while updating device status
@@ -1166,7 +1223,8 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
             devices_details_list = self.plugin_rpc.get_devices_details_list(
                 self.context,
                 devices,
-                self.agent_id)
+                self.agent_id,
+                cfg.CONF.host)
         except Exception as e:
             raise DeviceListRetrievalError(devices=devices, error=e)
 
@@ -1391,6 +1449,11 @@ class OVSNeutronAgent(n_rpc.RpcCallback,
                 if self.enable_tunneling:
                     self.setup_tunnel_br()
                     tunnel_sync = True
+                self.dvr_agent.reset_ovs_parameters(self.int_br,
+                                                    self.tun_br,
+                                                    self.patch_int_ofport,
+                                                    self.patch_tun_ofport)
+                self.dvr_agent.setup_dvr_flows_on_integ_tun_br()
             # Notify the plugin of tunnel IP
             if self.enable_tunneling and tunnel_sync:
                 LOG.info(_("Agent tunnel out of sync with plugin!"))
@@ -1522,6 +1585,7 @@ def create_agent_config_map(config):
         minimize_polling=config.AGENT.minimize_polling,
         tunnel_types=config.AGENT.tunnel_types,
         veth_mtu=config.AGENT.veth_mtu,
+        enable_distributed_routing=config.AGENT.enable_distributed_routing,
         l2_population=config.AGENT.l2_population,
         arp_responder=config.AGENT.arp_responder,
         use_veth_interconnection=config.OVS.use_veth_interconnection,
index 9cf6637dacc4a0dd0592161b1bf4c4ff7ecc2d4e..b74242b0b06ae8f7c6f0f8a5cff63405275010e4 100644 (file)
@@ -37,10 +37,16 @@ NONEXISTENT_PEER = 'nonexistent-peer'
 # The different types of tunnels
 TUNNEL_NETWORK_TYPES = [p_const.TYPE_GRE, p_const.TYPE_VXLAN]
 
+# Various tables for DVR use of integration bridge flows
+LOCAL_SWITCHING = 0
+DVR_TO_SRC_MAC = 1
+
 # Various tables for tunneling flows
-PATCH_LV_TO_TUN = 1
-GRE_TUN_TO_LV = 2
-VXLAN_TUN_TO_LV = 3
+DVR_PROCESS = 1
+PATCH_LV_TO_TUN = 2
+GRE_TUN_TO_LV = 3
+VXLAN_TUN_TO_LV = 4
+DVR_NOT_LEARN = 9
 LEARN_FROM_TUN = 10
 UCAST_TO_TUN = 20
 ARP_RESPONDER = 21
@@ -59,3 +65,6 @@ DEFAULT_OVSDBMON_RESPAWN = 30
 
 # Special return value for an invalid OVS ofport
 INVALID_OFPORT = '-1'
+
+# Represent invalid OF Port
+OFPORT_INVALID = -1
index 5e0daf289e56465ec111f9a9d1013ad32a05346e..406480d801d08f9849df058e6c3953be5f0b2902 100644 (file)
@@ -790,11 +790,12 @@ class TestOFANeutronAgent(OFAAgentTestCase):
                         ofpp.OFPActionSetField(vlan_vid=1 |
                                                ofp.OFPVID_PRESENT),
                     ]),
-                ofpp.OFPInstructionGotoTable(table_id=10),
+                ofpp.OFPInstructionGotoTable(
+                    table_id=constants.LEARN_FROM_TUN),
             ],
             match=ofpp.OFPMatch(tunnel_id=3),
             priority=1,
-            table_id=2)
+            table_id=constants.TUN_TABLE['gre'])
         sendmsg.assert_has_calls([mock.call(expected_msg)])
 
     def test__provision_local_vlan_outbound(self):
index 498046c8df9c065048627eda035692dd4e3df5b4..771a09ddff4d467e01a1f01fcd97a6e83afd0472 100644 (file)
@@ -85,6 +85,14 @@ class CreateAgentConfigMap(base.BaseTestCase):
         self.assertEqual(cfgmap['tunnel_types'],
                          [p_const.TYPE_GRE, p_const.TYPE_VXLAN])
 
+    def test_create_agent_config_map_enable_distributed_routing(self):
+        self.addCleanup(cfg.CONF.reset)
+        # Verify setting only enable_tunneling will default tunnel_type to GRE
+        cfg.CONF.set_override('enable_distributed_routing', True,
+                              group='AGENT')
+        cfgmap = ovs_neutron_agent.create_agent_config_map(cfg.CONF)
+        self.assertEqual(cfgmap['enable_distributed_routing'], True)
+
 
 class TestOvsNeutronAgent(base.BaseTestCase):
 
@@ -142,6 +150,8 @@ class TestOvsNeutronAgent(base.BaseTestCase):
         port = mock.Mock()
         port.ofport = ofport
         net_uuid = 'my-net-uuid'
+        fixed_ips = [{'subnet_id': 'my-subnet-uuid',
+                      'ip_address': '1.1.1.1'}]
         if old_local_vlan is not None:
             self.agent.local_vlan_map[net_uuid] = (
                 ovs_neutron_agent.LocalVLANMapping(
@@ -153,7 +163,8 @@ class TestOvsNeutronAgent(base.BaseTestCase):
                        'db_get_val', return_value=str(old_local_vlan)),
             mock.patch.object(self.agent.int_br, 'delete_flows')
         ) as (set_ovs_db_func, get_ovs_db_func, delete_flows_func):
-            self.agent.port_bound(port, net_uuid, 'local', None, None, False)
+            self.agent.port_bound(port, net_uuid, 'local', None, None,
+                                  fixed_ips, "compute:None", False)
         get_ovs_db_func.assert_called_once_with("Port", mock.ANY, "tag")
         if new_local_vlan != old_local_vlan:
             set_ovs_db_func.assert_called_once_with(
@@ -166,6 +177,34 @@ class TestOvsNeutronAgent(base.BaseTestCase):
             self.assertFalse(set_ovs_db_func.called)
             self.assertFalse(delete_flows_func.called)
 
+    def _setup_for_dvr_test(self, ofport=10):
+        self._port = mock.Mock()
+        self._port.ofport = ofport
+        self._port.vif_id = "1234-5678-90"
+        self.agent.enable_distributed_routing = True
+        self.agent.enable_tunneling = True
+        self.agent.patch_tun_ofport = 1
+        self.agent.patch_int_ofport = 2
+        self.agent.dvr_agent.local_ports = {}
+        self.agent.local_vlan_map = {}
+        self.agent.dvr_agent.enable_distributed_routing = True
+        self.agent.dvr_agent.enable_tunneling = True
+        self.agent.dvr_agent.patch_tun_ofport = 1
+        self.agent.dvr_agent.patch_int_ofport = 2
+        self.agent.dvr_agent.tun_br = mock.Mock()
+        self.agent.dvr_agent.local_dvr_map = {}
+        self.agent.dvr_agent.registered_dvr_macs = set()
+        self.agent.dvr_agent.dvr_mac_address = 'aa:22:33:44:55:66'
+        self._net_uuid = 'my-net-uuid'
+        self._fixed_ips = [{'subnet_id': 'my-subnet-uuid',
+                            'ip_address': '1.1.1.1'}]
+        self._compute_port = mock.Mock()
+        self._compute_port.ofport = 20
+        self._compute_port.vif_id = "1234-5678-91"
+        self._old_local_vlan = None
+        self._compute_fixed_ips = [{'subnet_id': 'my-subnet-uuid',
+                                    'ip_address': '1.1.1.3'}]
+
     def test_port_bound_deletes_flows_for_valid_ofport(self):
         self._mock_port_bound(ofport=1, new_local_vlan=1)
 
@@ -175,6 +214,287 @@ class TestOvsNeutronAgent(base.BaseTestCase):
     def test_port_bound_does_not_rewire_if_already_bound(self):
         self._mock_port_bound(ofport=-1, new_local_vlan=1, old_local_vlan=1)
 
+    def test_port_bound_for_dvr_interface(self, ofport=10):
+        self._setup_for_dvr_test()
+        with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
+                        'set_db_attribute',
+                        return_value=True):
+            with contextlib.nested(
+                mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
+                           'db_get_val',
+                           return_value=str(self._old_local_vlan)),
+                mock.patch.object(
+                    self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr',
+                    return_value={'gateway_ip': '1.1.1.1',
+                                  'cidr': '1.1.1.0/24',
+                                  'gateway_mac': 'aa:bb:cc:11:22:33'}),
+                mock.patch.object(self.agent.dvr_agent.plugin_rpc,
+                                  'get_compute_ports_on_host_by_subnet',
+                                  return_value=[]),
+                mock.patch.object(self.agent.dvr_agent.int_br,
+                                  'get_vif_port_by_id',
+                                  return_value=self._port),
+                mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
+                mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'),
+                mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'),
+                mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows')
+            ) as (get_ovs_db_func, get_subnet_fn, get_cphost_fn,
+                  get_vif_fn, add_flow_int_fn, delete_flows_int_fn,
+                  add_flow_tun_fn, delete_flows_tun_fn):
+                self.agent.port_bound(
+                    self._port, self._net_uuid, 'vxlan',
+                    None, None, self._fixed_ips,
+                    n_const.DEVICE_OWNER_DVR_INTERFACE,
+                    False)
+                self.assertTrue(add_flow_tun_fn.called)
+                self.assertTrue(delete_flows_int_fn.called)
+
+    def test_port_bound_for_dvr_with_compute_ports(self, ofport=10):
+        self._setup_for_dvr_test()
+        with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
+                        'set_db_attribute',
+                        return_value=True):
+            with contextlib.nested(
+                mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
+                           'db_get_val',
+                           return_value=str(self._old_local_vlan)),
+                mock.patch.object(self.agent.dvr_agent.plugin_rpc,
+                                  'get_subnet_for_dvr',
+                                  return_value={
+                                      'gateway_ip': '1.1.1.1',
+                                      'cidr': '1.1.1.0/24',
+                                      'gateway_mac': 'aa:bb:cc:11:22:33'}),
+                mock.patch.object(self.agent.dvr_agent.plugin_rpc,
+                                  'get_compute_ports_on_host_by_subnet',
+                                  return_value=[]),
+                mock.patch.object(self.agent.dvr_agent.int_br,
+                                  'get_vif_port_by_id',
+                                  return_value=self._port),
+                mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
+                mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'),
+                mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'),
+                mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows')
+            ) as (get_ovs_db_func, get_subnet_fn, get_cphost_fn,
+                  get_vif_fn, add_flow_int_fn, delete_flows_int_fn,
+                  add_flow_tun_fn, delete_flows_tun_fn):
+                self.agent.port_bound(
+                    self._port, self._net_uuid, 'vxlan',
+                    None, None, self._fixed_ips,
+                    n_const.DEVICE_OWNER_DVR_INTERFACE,
+                    False)
+                self.agent.port_bound(self._compute_port, self._net_uuid,
+                                      'vxlan', None, None,
+                                      self._compute_fixed_ips,
+                                      "compute:None", False)
+                self.assertTrue(add_flow_tun_fn.called)
+                self.assertTrue(add_flow_int_fn.called)
+                self.assertTrue(delete_flows_int_fn.called)
+
+    def test_port_bound_for_dvr_with_csnat_ports(self, ofport=10):
+        self._setup_for_dvr_test()
+        with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
+                        'set_db_attribute',
+                        return_value=True):
+            with contextlib.nested(
+                mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
+                           'db_get_val',
+                           return_value=str(self._old_local_vlan)),
+                mock.patch.object(
+                    self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr',
+                    return_value={'gateway_ip': '1.1.1.1',
+                                  'cidr': '1.1.1.0/24',
+                                  'gateway_mac': 'aa:bb:cc:11:22:33'}),
+                mock.patch.object(self.agent.dvr_agent.plugin_rpc,
+                                  'get_compute_ports_on_host_by_subnet',
+                                  return_value=[]),
+                mock.patch.object(self.agent.dvr_agent.int_br,
+                                  'get_vif_port_by_id',
+                                  return_value=self._port),
+                mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
+                mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'),
+                mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'),
+                mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows')
+            ) as (get_ovs_db_func, get_subnet_fn, get_cphost_fn,
+                  get_vif_fn, add_flow_int_fn, delete_flows_int_fn,
+                  add_flow_tun_fn, delete_flows_tun_fn):
+                self.agent.port_bound(
+                    self._port, self._net_uuid, 'vxlan',
+                    None, None, self._fixed_ips,
+                    n_const.DEVICE_OWNER_ROUTER_SNAT,
+                    False)
+                self.assertTrue(add_flow_int_fn.called)
+                self.assertTrue(delete_flows_int_fn.called)
+
+    def test_treat_devices_removed_for_dvr_interface(self, ofport=10):
+        self._setup_for_dvr_test()
+        with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
+                        'set_db_attribute',
+                        return_value=True):
+            with contextlib.nested(
+                mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
+                           'db_get_val',
+                           return_value=str(self._old_local_vlan)),
+                mock.patch.object(
+                    self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr',
+                    return_value={'gateway_ip': '1.1.1.1',
+                                  'cidr': '1.1.1.0/24',
+                                  'gateway_mac': 'aa:bb:cc:11:22:33'}),
+                mock.patch.object(self.agent.dvr_agent.plugin_rpc,
+                                  'get_compute_ports_on_host_by_subnet',
+                                  return_value=[]),
+                mock.patch.object(self.agent.dvr_agent.int_br,
+                                  'get_vif_port_by_id',
+                                  return_value=self._port),
+                mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
+                mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'),
+                mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'),
+                mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows')
+            ) as (get_ovs_db_func, get_subnet_fn, get_cphost_fn,
+                  get_vif_fn, add_flow_int_fn, delete_flows_int_fn,
+                  add_flow_tun_fn, delete_flows_tun_fn):
+                self.agent.port_bound(
+                    self._port, self._net_uuid, 'vxlan',
+                    None, None, self._fixed_ips,
+                    n_const.DEVICE_OWNER_DVR_INTERFACE,
+                    False)
+                self.assertTrue(add_flow_tun_fn.called)
+                self.assertTrue(delete_flows_int_fn.called)
+
+        with contextlib.nested(
+            mock.patch.object(self.agent, 'reclaim_local_vlan'),
+            mock.patch.object(self.agent.plugin_rpc, 'update_device_down',
+                              return_value=None),
+            mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'),
+            mock.patch.object(self.agent.dvr_agent.tun_br,
+                              'delete_flows')) as (reclaim_vlan_fn,
+                                                   update_dev_down_fn,
+                                                   delete_flows_int_fn,
+                                                   delete_flows_tun_fn):
+                self.agent.treat_devices_removed([self._port.vif_id])
+                self.assertTrue(delete_flows_int_fn.called)
+                self.assertTrue(delete_flows_tun_fn.called)
+
+    def test_treat_devices_removed_for_dvr_with_compute_ports(self, ofport=10):
+        self._setup_for_dvr_test()
+        with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
+                        'set_db_attribute',
+                        return_value=True):
+            with contextlib.nested(
+                mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
+                           'db_get_val',
+                           return_value=str(self._old_local_vlan)),
+                mock.patch.object(
+                    self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr',
+                    return_value={'gateway_ip': '1.1.1.1',
+                                  'cidr': '1.1.1.0/24',
+                                  'gateway_mac': 'aa:bb:cc:11:22:33'}),
+                mock.patch.object(self.agent.dvr_agent.plugin_rpc,
+                                  'get_compute_ports_on_host_by_subnet',
+                                  return_value=[]),
+                mock.patch.object(self.agent.dvr_agent.int_br,
+                                  'get_vif_port_by_id',
+                                  return_value=self._port),
+                mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
+                mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'),
+                mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'),
+                mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows')
+            ) as (get_ovs_db_func, get_subnet_fn, get_cphost_fn,
+                  get_vif_fn, add_flow_int_fn, delete_flows_int_fn,
+                  add_flow_tun_fn, delete_flows_tun_fn):
+                self.agent.port_bound(
+                    self._port, self._net_uuid, 'vxlan',
+                    None, None, self._fixed_ips,
+                    n_const.DEVICE_OWNER_DVR_INTERFACE,
+                    False)
+                self.agent.port_bound(self._compute_port,
+                                      self._net_uuid, 'vxlan',
+                                      None, None,
+                                      self._compute_fixed_ips,
+                                      "compute:None", False)
+                self.assertTrue(add_flow_tun_fn.called)
+                self.assertTrue(add_flow_int_fn.called)
+                self.assertTrue(delete_flows_int_fn.called)
+
+        with contextlib.nested(
+            mock.patch.object(self.agent, 'reclaim_local_vlan'),
+            mock.patch.object(self.agent.plugin_rpc, 'update_device_down',
+                              return_value=None),
+            mock.patch.object(self.agent.dvr_agent.int_br,
+                              'delete_flows')) as (reclaim_vlan_fn,
+                                                   update_dev_down_fn,
+                                                   delete_flows_int_fn):
+                self.agent.treat_devices_removed([self._compute_port.vif_id])
+                self.assertTrue(delete_flows_int_fn.called)
+
+    def test_treat_devices_removed_for_dvr_csnat_port(self, ofport=10):
+        self._setup_for_dvr_test()
+        with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
+                        'set_db_attribute',
+                        return_value=True):
+            with contextlib.nested(
+                mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
+                           'db_get_val',
+                           return_value=str(self._old_local_vlan)),
+                mock.patch.object(
+                    self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr',
+                    return_value={'gateway_ip': '1.1.1.1',
+                                  'cidr': '1.1.1.0/24',
+                                  'gateway_mac': 'aa:bb:cc:11:22:33'}),
+                mock.patch.object(self.agent.dvr_agent.plugin_rpc,
+                                  'get_compute_ports_on_host_by_subnet',
+                                  return_value=[]),
+                mock.patch.object(self.agent.dvr_agent.int_br,
+                                  'get_vif_port_by_id',
+                                  return_value=self._port),
+                mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
+                mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'),
+                mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'),
+                mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows')
+            ) as (get_ovs_db_func, get_subnet_fn, get_cphost_fn,
+                  get_vif_fn, add_flow_int_fn, delete_flows_int_fn,
+                  add_flow_tun_fn, delete_flows_tun_fn):
+                self.agent.port_bound(
+                    self._port, self._net_uuid, 'vxlan',
+                    None, None, self._fixed_ips,
+                    n_const.DEVICE_OWNER_ROUTER_SNAT,
+                    False)
+                self.assertTrue(add_flow_int_fn.called)
+                self.assertTrue(delete_flows_int_fn.called)
+
+        with contextlib.nested(
+            mock.patch.object(self.agent, 'reclaim_local_vlan'),
+            mock.patch.object(self.agent.plugin_rpc, 'update_device_down',
+                              return_value=None),
+            mock.patch.object(self.agent.dvr_agent.int_br,
+                              'delete_flows')) as (reclaim_vlan_fn,
+                                                   update_dev_down_fn,
+                                                   delete_flows_int_fn):
+                self.agent.treat_devices_removed([self._port.vif_id])
+                self.assertTrue(delete_flows_int_fn.called)
+
+    def test_setup_dvr_flows_on_int_br(self):
+        self._setup_for_dvr_test()
+        with contextlib.nested(
+                mock.patch.object(
+                    self.agent.dvr_agent.plugin_rpc,
+                    'get_dvr_mac_address_by_host',
+                    return_value={'host': 'cn1',
+                                  'mac_address': 'aa:bb:cc:dd:ee:ff'}),
+                mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
+                mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'),
+                mock.patch.object(self.agent.dvr_agent.int_br,
+                                  'remove_all_flows'),
+                mock.patch.object(
+                    self.agent.dvr_agent.plugin_rpc,
+                    'get_dvr_mac_address_list',
+                    return_value=[{'host': 'cn1',
+                                   'mac_address': 'aa:bb:cc:dd:ee:ff'},
+                                  {'host': 'cn2',
+                                   'mac_address': '11:22:33:44:55:66'}])) as \
+            (get_subnet_fn, get_cphost_fn, get_vif_fn,
+             add_flow_fn, delete_flows_fn):
+            self.agent.dvr_agent.setup_dvr_flows_on_integ_tun_br()
+
     def _test_port_dead(self, cur_tag=None):
         port = mock.Mock()
         port.ofport = 1
@@ -372,7 +692,12 @@ class TestOvsNeutronAgent(base.BaseTestCase):
                              'network_id': 'yyy',
                              'physical_network': 'foo',
                              'segmentation_id': 'bar',
-                             'network_type': 'baz'}
+                             'network_type': 'baz',
+                             'fixed_ips': [{'subnet_id': 'my-subnet-uuid',
+                                            'ip_address': '1.1.1.1'}],
+                             'device_owner': 'compute:None'
+                             }
+
         with contextlib.nested(
             mock.patch.object(self.agent.plugin_rpc,
                               'get_devices_details_list',
@@ -582,6 +907,36 @@ class TestOvsNeutronAgent(base.BaseTestCase):
             self.assertNotEqual(self.agent.get_peer_name('int-', bridge1),
                                 self.agent.get_peer_name('int-', bridge2))
 
+    def test_setup_tunnel_br(self):
+        self.tun_br = mock.Mock()
+        with contextlib.nested(
+            mock.patch.object(self.agent.int_br, "add_patch_port",
+                              return_value=1),
+            mock.patch.object(self.agent.tun_br, "add_patch_port",
+                              return_value=2),
+            mock.patch.object(self.agent.tun_br, "remove_all_flows"),
+            mock.patch.object(self.agent.tun_br, "add_flow"),
+            mock.patch.object(ovs_lib, "OVSBridge"),
+            mock.patch.object(self.agent.tun_br, "reset_bridge"),
+            mock.patch.object(sys, "exit")
+        ) as (intbr_patch_fn, tunbr_patch_fn, remove_all_fn,
+              add_flow_fn, ovs_br_fn, reset_br_fn, exit_fn):
+            self.agent.setup_tunnel_br(None)
+            self.assertTrue(intbr_patch_fn.called)
+
+    def test_setup_tunnel_port(self):
+        self.agent.tun_br = mock.Mock()
+        self.agent.l2_pop = False
+        self.agent.udp_vxlan_port = 8472
+        self.agent.tun_br_ofports['vxlan'] = {}
+        with contextlib.nested(
+            mock.patch.object(self.agent.tun_br, "add_tunnel_port",
+                              return_value='6'),
+            mock.patch.object(self.agent.tun_br, "add_flow")
+        ) as (add_tun_port_fn, add_flow_fn):
+            self.agent.setup_tunnel_port('portname', '1.2.3.4', 'vxlan')
+            self.assertTrue(add_tun_port_fn.called)
+
     def test_port_unbound(self):
         with mock.patch.object(self.agent, "reclaim_local_vlan") as reclvl_fn:
             self.agent.enable_tunneling = True
@@ -808,6 +1163,35 @@ class TestOvsNeutronAgent(base.BaseTestCase):
             self.agent.reclaim_local_vlan('net2')
             del_port_fn.assert_called_once_with('gre-02020202')
 
+    def test_dvr_mac_address_update(self):
+        self._setup_for_dvr_test()
+        with contextlib.nested(
+            mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
+            mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'),
+            mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'),
+            #mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows')
+        ) as (add_flow_fn, add_flow_tn_fn, del_flows_fn):
+            self.agent.dvr_agent.\
+                dvr_mac_address_update(
+                    dvr_macs=[{'host': 'cn2',
+                               'mac_address': 'aa:bb:cc:dd:ee:ff'}])
+            add_flow_tn_fn.assert_called_with(table=constants.DVR_NOT_LEARN,
+                                              priority=1,
+                                              dl_src='aa:bb:cc:dd:ee:ff',
+                                              actions="output:%s"
+                                              % self.agent.patch_int_ofport
+                                              )
+            self.assertFalse(del_flows_fn.called)
+        with contextlib.nested(
+            mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
+            mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows'),
+            mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows')
+        ) as (add_flow_fn, del_flows_tn_fn, del_flows_fn):
+            self.agent.dvr_agent.dvr_mac_address_update(dvr_macs=[])
+            del_flows_tn_fn.assert_called_with(table=constants.DVR_NOT_LEARN,
+                                               dl_src='aa:bb:cc:dd:ee:ff')
+            self.assertFalse(add_flow_fn.called)
+
     def test_daemon_loop_uses_polling_manager(self):
         with mock.patch(
             'neutron.agent.linux.polling.get_polling_manager') as mock_get_pm:
index 32f37f092a13d8776311a849742d0d9e08aeb909..095f761c426032b67e2d363cb6e71edb44e30053 100644 (file)
@@ -44,6 +44,9 @@ LVM_FLAT = ovs_neutron_agent.LocalVLANMapping(
     LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS)
 LVM_VLAN = ovs_neutron_agent.LocalVLANMapping(
     LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS)
+FIXED_IPS = [{'subnet_id': 'my-subnet-uuid',
+              'ip_address': '1.1.1.1'}]
+VM_DEVICE_OWNER = "compute:None"
 
 TUN_OFPORTS = {p_const.TYPE_GRE: {'ip1': '11', 'ip2': '12'}}
 
@@ -199,9 +202,9 @@ class TunnelTest(base.BaseTestCase):
         self.mock_tun_bridge_expected += [
             mock.call.remove_all_flows(),
             mock.call.add_flow(priority=1,
-                               in_port=self.INT_OFPORT,
                                actions="resubmit(,%s)" %
-                               constants.PATCH_LV_TO_TUN),
+                               constants.PATCH_LV_TO_TUN,
+                               in_port=self.INT_OFPORT),
             mock.call.add_flow(priority=0, actions="drop"),
             mock.call.add_flow(priority=0, table=constants.PATCH_LV_TO_TUN,
                                dl_dst=UCAST_MAC,
@@ -450,7 +453,9 @@ class TunnelTest(base.BaseTestCase):
 
         a = self._build_agent()
         a.local_vlan_map[NET_UUID] = LVM
-        a.port_bound(VIF_PORT, NET_UUID, 'gre', None, LS_ID, False)
+        a.local_dvr_map = {}
+        a.port_bound(VIF_PORT, NET_UUID, 'gre', None, LS_ID,
+                     FIXED_IPS, VM_DEVICE_OWNER, False)
         self._verify_mock_calls()
 
     def test_port_unbound(self):
@@ -486,7 +491,7 @@ class TunnelTest(base.BaseTestCase):
             mock.call.add_tunnel_port('gre-1', '10.0.10.1', '10.0.0.1',
                                       'gre', 4789, True),
             mock.call.add_flow(priority=1, in_port=tunnel_port,
-                               actions='resubmit(,2)')
+                               actions='resubmit(,3)')
         ]
 
         a = self._build_agent()