]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
NSX: remove unnecessary checks on network delete
authorSalvatore Orlando <salv.orlando@gmail.com>
Tue, 8 Jul 2014 23:16:39 +0000 (16:16 -0700)
committerSalvatore Orlando <salv.orlando@gmail.com>
Tue, 8 Jul 2014 23:16:39 +0000 (16:16 -0700)
Since commit b50e66f router interfaces are not deleted
automatically when the network hosting them is deleted.

The NSX plugin still has additional logic to ensure all the
corresponding backend resources have been deleted as well.
Such logic is now obsolete and can be removed. Moreover it
also wasteful as it perform queries for retrieving NSX
resource identifiers.

This patch also amends a few related log statement to make
them clearer.

Change-Id: If3d8d98096a8363b216707f2558e37c80f58e515
Closes-Bug: #1339401

neutron/plugins/vmware/plugins/base.py

index 3ffb4ed9d8bbf019b21a14853a6aac251046d788..db818b798bc111af1c0538aac99a78ccbc275d1f 100644 (file)
@@ -1027,14 +1027,6 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
 
     def delete_network(self, context, id):
         external = self._network_is_external(context, id)
-        # Before deleting ports, ensure the peer of a NSX logical
-        # port with a patch attachment is removed too
-        port_filter = {'network_id': [id],
-                       'device_owner': [constants.DEVICE_OWNER_ROUTER_INTF]}
-        router_iface_ports = self.get_ports(context, filters=port_filter)
-        for port in router_iface_ports:
-            nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
-                context.session, self.cluster, id)
         # Before removing entry from Neutron DB, retrieve NSX switch
         # identifiers for removing them from backend
         if not external:
@@ -1044,41 +1036,15 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
             self._process_l3_delete(context, id)
             super(NsxPluginV2, self).delete_network(context, id)
 
-        # clean up network owned ports
-        for port in router_iface_ports:
-            try:
-                if nsx_port_id:
-                    nsx_router_id = nsx_utils.get_nsx_router_id(
-                        context.session, self.cluster, port['device_id'])
-                    routerlib.delete_peer_router_lport(self.cluster,
-                                                       nsx_router_id,
-                                                       nsx_switch_id,
-                                                       nsx_port_id)
-                else:
-                    LOG.warning(_("A nsx lport identifier was not found for "
-                                  "neutron port '%s'. Unable to remove "
-                                  "the peer router port for this switch port"),
-                                port['id'])
-
-            except (TypeError, KeyError,
-                    api_exc.NsxApiException,
-                    api_exc.ResourceNotFound):
-                # Do not raise because the issue might as well be that the
-                # router has already been deleted, so there would be nothing
-                # to do here
-                LOG.warning(_("Ignoring exception as this means the peer for "
-                              "port '%s' has already been deleted."),
-                            nsx_port_id)
-
         # Do not go to NSX for external networks
         if not external:
             try:
                 switchlib.delete_networks(self.cluster, id, lswitch_ids)
-                LOG.debug(_("delete_network completed for tenant: %s"),
-                          context.tenant_id)
             except n_exc.NotFound:
-                LOG.warning(_("Did not found lswitch %s in NSX"), id)
+                LOG.warning(_("The following logical switches were not found "
+                              "on the NSX backend:%s"), lswitch_ids)
         self.handle_network_dhcp_access(context, id, action='delete_network')
+        LOG.debug("Delete network complete for network: %s", id)
 
     def get_network(self, context, id, fields=None):
         with context.session.begin(subtransactions=True):