From: Salvatore Orlando Date: Thu, 19 Jun 2014 11:01:03 +0000 (-0700) Subject: NSX: propagate network name updates to backend X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=7b10ab2a54fce59cdc5eb86dfcec7db3364a20d8;p=openstack-build%2Fneutron-build.git NSX: propagate network name updates to backend This patch updates the logical switch name on the NSX backend if a new network name is provided in the update request. This patches also fixes tag management for update_lswitch in neutron.plugins.vmware.nsxlib.switch and add relevant unit tests. Closes-Bug: 1276128 Change-Id: Ic8921c059012a875006701ac10d2dcae97f30253 --- diff --git a/neutron/plugins/vmware/nsxlib/switch.py b/neutron/plugins/vmware/nsxlib/switch.py index f84e2f584..f1b84a615 100644 --- a/neutron/plugins/vmware/nsxlib/switch.py +++ b/neutron/plugins/vmware/nsxlib/switch.py @@ -135,10 +135,15 @@ def create_lswitch(cluster, neutron_net_id, tenant_id, display_name, def update_lswitch(cluster, lswitch_id, display_name, tenant_id=None, **kwargs): uri = nsxlib._build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id) - lswitch_obj = {"display_name": utils.check_and_truncate(display_name), - "tags": utils.get_tags(os_tid=tenant_id)} - if "tags" in kwargs: - lswitch_obj["tags"].extend(kwargs["tags"]) + lswitch_obj = {"display_name": utils.check_and_truncate(display_name)} + # NOTE: tag update will not 'merge' existing tags with new ones. + tags = [] + if tenant_id: + tags = utils.get_tags(os_tid=tenant_id) + # The 'tags' kwarg might existing and be None + tags.extend(kwargs.get('tags') or []) + if tags: + lswitch_obj['tags'] = tags try: return nsxlib.do_request(HTTP_PUT, uri, json.dumps(lswitch_obj), cluster=cluster) diff --git a/neutron/plugins/vmware/plugins/base.py b/neutron/plugins/vmware/plugins/base.py index 4870983b0..384964c8f 100644 --- a/neutron/plugins/vmware/plugins/base.py +++ b/neutron/plugins/vmware/plugins/base.py @@ -1125,6 +1125,27 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin, self._process_network_queue_mapping(context, net, net_queue_id) self._process_l3_update(context, net, network['network']) self._extend_network_dict_provider(context, net) + # If provided, update port name on backend; treat backend failures as + # not critical (log error, but do not raise) + if 'name' in network['network']: + # in case of chained switches update name only for the first one + nsx_switch_ids = nsx_utils.get_nsx_switch_ids( + context.session, self.cluster, id) + if not nsx_switch_ids or len(nsx_switch_ids) < 1: + LOG.warn(_("Unable to find NSX mappings for neutron " + "network:%s"), id) + try: + switchlib.update_lswitch(self.cluster, + nsx_switch_ids[0], + network['network']['name']) + except api_exc.NsxApiException as e: + LOG.warn(_("Logical switch update on NSX backend failed. " + "Neutron network id:%(net_id)s; " + "NSX lswitch id:%(lswitch_id)s;" + "Error:%(error)s"), + {'net_id': id, 'lswitch_id': nsx_switch_ids[0], + 'error': e}) + return net def create_port(self, context, port): diff --git a/neutron/tests/unit/vmware/nsxlib/test_switch.py b/neutron/tests/unit/vmware/nsxlib/test_switch.py index 987360c91..db8c5af98 100644 --- a/neutron/tests/unit/vmware/nsxlib/test_switch.py +++ b/neutron/tests/unit/vmware/nsxlib/test_switch.py @@ -94,9 +94,7 @@ class LogicalSwitchesTestCase(base.NsxlibTestCase): self.assertEqual(second_ls_tags['quantum_net_id'], network_id) - def test_update_lswitch(self): - new_name = 'new-name' - new_tags = [{'scope': 'new_tag', 'tag': 'xxx'}] + def _test_update_lswitch(self, tenant_id, name, tags): transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] lswitch = switchlib.create_lswitch(self.fake_cluster, @@ -105,14 +103,28 @@ class LogicalSwitchesTestCase(base.NsxlibTestCase): 'fake-switch', transport_zones_config) switchlib.update_lswitch(self.fake_cluster, lswitch['uuid'], - new_name, tags=new_tags) + name, tenant_id=tenant_id, tags=tags) res_lswitch = switchlib.get_lswitches(self.fake_cluster, lswitch['uuid']) self.assertEqual(len(res_lswitch), 1) - self.assertEqual(res_lswitch[0]['display_name'], new_name) + self.assertEqual(res_lswitch[0]['display_name'], name) + if not tags: + # no need to validate tags + return switch_tags = self._build_tag_dict(res_lswitch[0]['tags']) - self.assertIn('new_tag', switch_tags) - self.assertEqual(switch_tags['new_tag'], 'xxx') + for tag in tags: + self.assertIn(tag['scope'], switch_tags) + self.assertEqual(tag['tag'], switch_tags[tag['scope']]) + + def test_update_lswitch(self): + self._test_update_lswitch(None, 'new-name', + [{'scope': 'new_tag', 'tag': 'xxx'}]) + + def test_update_lswitch_no_tags(self): + self._test_update_lswitch(None, 'new-name', None) + + def test_update_lswitch_tenant_id(self): + self._test_update_lswitch('whatever', 'new-name', None) def test_update_non_existing_lswitch_raises(self): self.assertRaises(exceptions.NetworkNotFound, diff --git a/neutron/tests/unit/vmware/test_nsx_plugin.py b/neutron/tests/unit/vmware/test_nsx_plugin.py index 4161a0b2e..4b99bd734 100644 --- a/neutron/tests/unit/vmware/test_nsx_plugin.py +++ b/neutron/tests/unit/vmware/test_nsx_plugin.py @@ -338,6 +338,23 @@ class TestNetworksV2(test_plugin.TestNetworksV2, NsxPluginV2TestCase): context.get_admin_context(), net['network']['id'], data) + def test_update_network_with_name_calls_nsx(self): + with mock.patch.object( + nsxlib.switch, 'update_lswitch') as update_lswitch_mock: + # don't worry about deleting this network, do not use + # context manager + ctx = context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + net = plugin.create_network( + ctx, {'network': {'name': 'xxx', + 'admin_state_up': True, + 'shared': False, + 'port_security_enabled': True}}) + plugin.update_network(ctx, net['id'], + {'network': {'name': 'yyy'}}) + update_lswitch_mock.assert_called_once_with( + mock.ANY, mock.ANY, 'yyy') + class SecurityGroupsTestCase(ext_sg.SecurityGroupDBTestCase):