]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
Ensure port get works when NVP mapping not stored in Quantum DB
authorSalvatore Orlando <salv.orlando@gmail.com>
Mon, 11 Mar 2013 12:55:51 +0000 (13:55 +0100)
committerSalvatore Orlando <salv.orlando@gmail.com>
Wed, 13 Mar 2013 02:06:49 +0000 (03:06 +0100)
Bug 1153616

If the entry for the mapping between a quantum and a NVP port identifier
is not found in the Quantum DB, search the port on NVP, and, if found,
add the mapping entry.
This ensures upgraded folsom databases keep working with Grizzly code.

Change-Id: I74943e8271f522dcd21c1c34b0159dd61bb66e76

quantum/plugins/nicira/nicira_nvp_plugin/QuantumPlugin.py
quantum/plugins/nicira/nicira_nvp_plugin/nvplib.py
quantum/tests/unit/nicira/test_nvplib.py

index c4cd06baab0fb153360ae7b74d3d39d96c5f2736..c5a2780bd7f45dcc87955fd8a606fd493c98d60a 100644 (file)
@@ -508,17 +508,18 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
                         "external networks. Port %s will be down."),
                       port_data['network_id'])
             return
-
-        port = nicira_db.get_nvp_port_id(context.session, port_data['id'])
-        if port is None:
-            raise q_exc.PortNotFound(port_id=port_data['id'])
+        nvp_port_id = self._nvp_get_port_id(context, self.default_cluster,
+                                            port_data)
+        if not nvp_port_id:
+            LOG.debug(_("Port '%s' was already deleted on NVP platform"), id)
+            return
         # TODO(bgh): if this is a bridged network and the lswitch we just got
         # back will have zero ports after the delete we should garbage collect
         # the lswitch.
         try:
             nvplib.delete_port(self.default_cluster,
                                port_data['network_id'],
-                               port)
+                               nvp_port_id)
             LOG.debug(_("_nvp_delete_port completed for port %(port_id)s "
                         "on network %(net_id)s"),
                       {'port_id': port_data['id'],
@@ -530,8 +531,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
     def _nvp_delete_router_port(self, context, port_data):
         # Delete logical router port
         lrouter_id = port_data['device_id']
-        nvp_port_id = nicira_db.get_nvp_port_id(context.session,
-                                                port_data['id'])
+        nvp_port_id = self._nvp_get_port_id(context, self.default_cluster,
+                                            port_data)
         if not nvp_port_id:
             raise q_exc.PortNotFound(port_id=port_data['id'])
 
@@ -738,6 +739,31 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         # this is a no-op driver
         pass
 
+    def _nvp_get_port_id(self, context, cluster, quantum_port):
+        """ Return the NVP port uuid for a given quantum port.
+        First, look up the Quantum database. If not found, execute
+        a query on NVP platform as the mapping might be missing because
+        the port was created before upgrading to grizzly. """
+        nvp_port_id = nicira_db.get_nvp_port_id(context.session,
+                                                quantum_port['id'])
+        if nvp_port_id:
+            return nvp_port_id
+        # Perform a query to NVP and then update the DB
+        try:
+            nvp_port = nvplib.get_port_by_quantum_tag(
+                cluster,
+                quantum_port['network_id'],
+                quantum_port['id'])
+            if nvp_port:
+                nicira_db.add_quantum_nvp_port_mapping(
+                    context.session,
+                    quantum_port['id'],
+                    nvp_port['uuid'])
+                return nvp_port['uuid']
+        except:
+            LOG.exception(_("Unable to find NVP uuid for Quantum port %s"),
+                          quantum_port['id'])
+
     def _extend_fault_map(self):
         """ Extends the Quantum Fault Map
 
@@ -969,8 +995,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
                        'device_owner': ['network:router_interface']}
         router_iface_ports = self.get_ports(context, filters=port_filter)
         for port in router_iface_ports:
-            nvp_port_id = nicira_db.get_nvp_port_id(context.session,
-                                                    port['id'])
+            nvp_port_id = self._nvp_get_port_id(
+                context, self.default_cluster, port)
             if nvp_port_id:
                 port['nvp_port_id'] = nvp_port_id
             else:
@@ -1408,7 +1434,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             self._extend_port_port_security_dict(context, ret_port)
             self._extend_port_dict_security_group(context, ret_port)
             LOG.debug(_("Update port request: %s"), port)
-            nvp_port_id = nicira_db.get_nvp_port_id(context.session, id)
+            nvp_port_id = self._nvp_get_port_id(
+                context, self.default_cluster, ret_port)
             nvplib.update_port(self.default_cluster,
                                ret_port['network_id'],
                                nvp_port_id, id, tenant_id,
@@ -1483,20 +1510,26 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             if self._network_is_external(context,
                                          quantum_db_port['network_id']):
                 return quantum_db_port
-
-            nvp_id = nicira_db.get_nvp_port_id(context.session, id)
-            #TODO: pass the appropriate cluster here
-            try:
-                port = nvplib.get_logical_port_status(
-                    self.default_cluster, quantum_db_port['network_id'],
-                    nvp_id)
-                quantum_db_port["admin_state_up"] = (
-                    port["admin_status_enabled"])
-                if port["fabric_status_up"]:
-                    quantum_db_port["status"] = constants.PORT_STATUS_ACTIVE
-                else:
-                    quantum_db_port["status"] = constants.PORT_STATUS_DOWN
-            except q_exc.NotFound:
+            nvp_id = self._nvp_get_port_id(context, self.default_cluster,
+                                           quantum_db_port)
+            # If there's no nvp IP do not bother going to NVP and put
+            # the port in error state
+            if nvp_id:
+                #TODO: pass the appropriate cluster here
+                try:
+                    port = nvplib.get_logical_port_status(
+                        self.default_cluster, quantum_db_port['network_id'],
+                        nvp_id)
+                    quantum_db_port["admin_state_up"] = (
+                        port["admin_status_enabled"])
+                    if port["fabric_status_up"]:
+                        quantum_db_port["status"] = (
+                            constants.PORT_STATUS_ACTIVE)
+                    else:
+                        quantum_db_port["status"] = constants.PORT_STATUS_DOWN
+                except q_exc.NotFound:
+                    quantum_db_port["status"] = constants.PORT_STATUS_ERROR
+            else:
                 quantum_db_port["status"] = constants.PORT_STATUS_ERROR
         return quantum_db_port
 
index 3308db3fd88e7ba038b0562fcafb408d76fd3837..c07a7bc2d197dc9b7520a2427e7191f624ccedcf 100644 (file)
@@ -677,6 +677,36 @@ def get_port_by_display_name(clusters, lswitch, display_name):
     raise exception.PortNotFound(port_id=display_name, net_id=lswitch)
 
 
+def get_port_by_quantum_tag(cluster, lswitch_uuid, quantum_port_id):
+    """Return the NVP UUID of the logical port with tag q_port_id
+    equal to quantum_port_id or None if the port is not Found.
+    """
+    uri = _build_uri_path(LSWITCHPORT_RESOURCE,
+                          parent_resource_id=lswitch_uuid,
+                          fields='uuid',
+                          filters={'tag': quantum_port_id,
+                                   'tag_scope': 'q_port_id'})
+    LOG.debug(_("Looking for port with q_port_id tag '%(quantum_port_id)s' "
+                "on: '%(lswitch_uuid)s'") %
+              {'quantum_port_id': quantum_port_id,
+               'lswitch_uuid': lswitch_uuid})
+    try:
+        res_obj = do_single_request(HTTP_GET, uri, cluster=cluster)
+    except Exception:
+        LOG.exception(_("An exception occurred while querying NVP ports"))
+        raise
+    res = json.loads(res_obj)
+    num_results = len(res["results"])
+    if num_results >= 1:
+        if num_results > 1:
+            LOG.warn(_("Found '%(num_ports)d' ports with "
+                       "q_port_id tag: '%(quantum_port_id)s'. "
+                       "Only 1 was expected.") %
+                     {'num_ports': num_results,
+                      'quantum_port_id': quantum_port_id})
+        return res["results"][0]
+
+
 def get_port(cluster, network, port, relations=None):
     LOG.info(_("get_port() %(network)s %(port)s"), locals())
     uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
index 3e41456a1245ceb847c8101cc880b38f2e2b998e..a0a1f9de1a61ab1563db20ec374bb63ee3eee58c 100644 (file)
@@ -177,3 +177,30 @@ class NvplibL2GatewayTestCase(NvplibTestCase):
         self.assertIn('LogicalPortAttachment', resp_obj)
         self.assertEqual(resp_obj['LogicalPortAttachment']['type'],
                          'L2GatewayAttachment')
+
+
+class TestNvpLibLogicalPorts(NvplibTestCase):
+
+    def test_get_port_by_tag(self):
+        tenant_id = 'pippo'
+        quantum_port_id = 'whatever'
+        lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id,
+                                        'fake-switch')
+        lport = nvplib.create_lport(self.fake_cluster, lswitch['uuid'],
+                                    tenant_id, quantum_port_id,
+                                    'name', 'device_id', True)
+        lport2 = nvplib.get_port_by_quantum_tag(self.fake_cluster,
+                                                lswitch['uuid'],
+                                                quantum_port_id)
+        self.assertIsNotNone(lport2)
+        self.assertEqual(lport['uuid'], lport2['uuid'])
+
+    def test_get_port_by_tag_not_found_returns_None(self):
+        tenant_id = 'pippo'
+        quantum_port_id = 'whatever'
+        lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id,
+                                        'fake-switch')
+        lport = nvplib.get_port_by_quantum_tag(self.fake_cluster,
+                                               lswitch['uuid'],
+                                               quantum_port_id)
+        self.assertIsNone(lport)