From ec5c84d1219d778b83a0fa61a46926b21c2b2053 Mon Sep 17 00:00:00 2001 From: Vipin Balachandran Date: Sat, 10 Jan 2015 15:26:32 +0530 Subject: [PATCH] VMware: Create volume backing in specific clusters The VMDK driver doesn't allow specifying a set of vCenter clusters as the target for volume backing creation unlike the VMware Nova driver. This patch adds support for an optional list of vCenter cluster names in Cinder conf which will be used by the VMDK driver as targets for volume backing creation. DocImpact Added a new config option 'vmware_cluster_name' which specifies a vCenter compute cluster where volumes should be created. Change-Id: I0dcb3a8ac7c9eaa0d0697f4967873d82bf1bbddf --- cinder/tests/unit/test_vmware_datastore.py | 8 ++ cinder/tests/unit/test_vmware_vmdk.py | 140 ++++++++++++++++++++- cinder/tests/unit/test_vmware_volumeops.py | 52 ++++++++ cinder/volume/drivers/vmware/datastore.py | 2 +- cinder/volume/drivers/vmware/exceptions.py | 10 ++ cinder/volume/drivers/vmware/vmdk.py | 39 ++++-- cinder/volume/drivers/vmware/volumeops.py | 46 +++++++ 7 files changed, 283 insertions(+), 14 deletions(-) diff --git a/cinder/tests/unit/test_vmware_datastore.py b/cinder/tests/unit/test_vmware_datastore.py index 65542180e..d29eed610 100644 --- a/cinder/tests/unit/test_vmware_datastore.py +++ b/cinder/tests/unit/test_vmware_datastore.py @@ -383,6 +383,14 @@ class DatastoreTest(test.TestCase): self._vops.get_connected_hosts.reset_mock() self._vops.get_connected_hosts.return_value = None + def test_select_datastore_with_empty_host_list(self): + size_bytes = units.Ki + req = {self._ds_sel.SIZE_BYTES: size_bytes} + self._vops.get_hosts.return_value = mock.Mock(objects=[]) + + self.assertEqual((), self._ds_sel.select_datastore(req, hosts=[])) + self._vops.get_hosts.assert_called_once_with() + @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' '_filter_by_profile') diff --git a/cinder/tests/unit/test_vmware_vmdk.py b/cinder/tests/unit/test_vmware_vmdk.py index 4c005439b..bd14c0c64 100644 --- a/cinder/tests/unit/test_vmware_vmdk.py +++ b/cinder/tests/unit/test_vmware_vmdk.py @@ -149,6 +149,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase): TMP_DIR = "/vmware-tmp" CA_FILE = "/etc/ssl/rui-ca-cert.pem" VMDK_DRIVER = vmdk.VMwareEsxVmdkDriver + CLUSTERS = ["cls-1", "cls-2"] def setUp(self): super(VMwareEsxVmdkDriverTestCase, self).setUp() @@ -166,10 +167,11 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase): self._config.vmware_tmp_dir = self.TMP_DIR self._config.vmware_ca_file = self.CA_FILE self._config.vmware_insecure = False + self._config.vmware_cluster_name = self.CLUSTERS self._db = mock.Mock() self._driver = vmdk.VMwareEsxVmdkDriver(configuration=self._config, db=self._db) - api_retry_count = self._config.vmware_api_retry_count, + api_retry_count = self._config.vmware_api_retry_count task_poll_interval = self._config.vmware_task_poll_interval, self._session = api.VMwareAPISession(self.IP, self.USERNAME, self.PASSWORD, api_retry_count, @@ -1773,21 +1775,37 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase): version = self._driver._get_vc_version() self.assertEqual(ver.LooseVersion('6.0.1'), version) + @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_vc_version') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' 'session', new_callable=mock.PropertyMock) - def test_do_setup_with_pbm_disabled(self, session, get_vc_version): + def test_do_setup_with_pbm_disabled(self, session, get_vc_version, + vops_cls): session_obj = mock.Mock(name='session') session.return_value = session_obj get_vc_version.return_value = ver.LooseVersion('5.0') + cluster_refs = mock.Mock() + cluster_refs.values.return_value = mock.sentinel.cluster_refs + vops = mock.Mock() + vops.get_cluster_refs.return_value = cluster_refs + + def vops_side_effect(session, max_objects): + vops._session = session + vops._max_objects = max_objects + return vops + + vops_cls.side_effect = vops_side_effect + self._driver.do_setup(mock.ANY) self.assertFalse(self._driver._storage_policy_enabled) get_vc_version.assert_called_once_with() self.assertEqual(session_obj, self._driver.volumeops._session) self.assertEqual(session_obj, self._driver.ds_sel._session) + self.assertEqual(mock.sentinel.cluster_refs, self._driver._clusters) + vops.get_cluster_refs.assert_called_once_with(self.CLUSTERS) @mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' @@ -1807,12 +1825,14 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase): get_pbm_wsdl_location.assert_called_once_with( six.text_type(vc_version)) + @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps') @mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_vc_version') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' 'session', new_callable=mock.PropertyMock) - def test_do_setup(self, session, get_vc_version, get_pbm_wsdl_location): + def test_do_setup(self, session, get_vc_version, get_pbm_wsdl_location, + vops_cls): session_obj = mock.Mock(name='session') session.return_value = session_obj @@ -1820,6 +1840,18 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase): get_vc_version.return_value = vc_version get_pbm_wsdl_location.return_value = 'file:///pbm.wsdl' + cluster_refs = mock.Mock() + cluster_refs.values.return_value = mock.sentinel.cluster_refs + vops = mock.Mock() + vops.get_cluster_refs.return_value = cluster_refs + + def vops_side_effect(session, max_objects): + vops._session = session + vops._max_objects = max_objects + return vops + + vops_cls.side_effect = vops_side_effect + self._driver.do_setup(mock.ANY) self.assertTrue(self._driver._storage_policy_enabled) @@ -1828,6 +1860,8 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase): six.text_type(vc_version)) self.assertEqual(session_obj, self._driver.volumeops._session) self.assertEqual(session_obj, self._driver.ds_sel._session) + self.assertEqual(mock.sentinel.cluster_refs, self._driver._clusters) + vops.get_cluster_refs.assert_called_once_with(self.CLUSTERS) @mock.patch.object(VMDK_DRIVER, '_extend_volumeops_virtual_disk') @mock.patch.object(VMDK_DRIVER, '_create_backing') @@ -2578,6 +2612,106 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase): close.assert_called_once_with(fd) delete_if_exists.assert_called_once_with(tmp) + @mock.patch.object(VMDK_DRIVER, 'volumeops') + @mock.patch.object(VMDK_DRIVER, 'ds_sel') + def test_select_datastore(self, ds_sel, vops): + cls_1 = mock.sentinel.cls_1 + cls_2 = mock.sentinel.cls_2 + self._driver._clusters = [cls_1, cls_2] + + host_1 = mock.sentinel.host_1 + host_2 = mock.sentinel.host_2 + host_3 = mock.sentinel.host_3 + vops.get_cluster_hosts.side_effect = [[host_1, host_2], [host_3]] + + best_candidate = mock.sentinel.best_candidate + ds_sel.select_datastore.return_value = best_candidate + + req = mock.sentinel.req + self.assertEqual(best_candidate, self._driver._select_datastore(req)) + + exp_calls = [mock.call(cls_1), mock.call(cls_2)] + self.assertEqual(exp_calls, vops.get_cluster_hosts.call_args_list) + + ds_sel.select_datastore.assert_called_once_with( + req, hosts=[host_1, host_2, host_3]) + + @mock.patch.object(VMDK_DRIVER, 'volumeops') + @mock.patch.object(VMDK_DRIVER, 'ds_sel') + def test_select_datastore_with_no_best_candidate(self, ds_sel, vops): + cls_1 = mock.sentinel.cls_1 + cls_2 = mock.sentinel.cls_2 + self._driver._clusters = [cls_1, cls_2] + + host_1 = mock.sentinel.host_1 + host_2 = mock.sentinel.host_2 + host_3 = mock.sentinel.host_3 + vops.get_cluster_hosts.side_effect = [[host_1, host_2], [host_3]] + + ds_sel.select_datastore.return_value = () + + req = mock.sentinel.req + self.assertRaises(vmdk_exceptions.NoValidDatastoreException, + self._driver._select_datastore, + req) + + exp_calls = [mock.call(cls_1), mock.call(cls_2)] + self.assertEqual(exp_calls, vops.get_cluster_hosts.call_args_list) + + ds_sel.select_datastore.assert_called_once_with( + req, hosts=[host_1, host_2, host_3]) + + @mock.patch.object(VMDK_DRIVER, 'volumeops') + @mock.patch.object(VMDK_DRIVER, 'ds_sel') + def test_select_datastore_with_single_host(self, ds_sel, vops): + cls_1 = mock.sentinel.cls_1 + cls_2 = mock.sentinel.cls_2 + self._driver._clusters = [cls_1, cls_2] + + host_1 = mock.sentinel.host_1 + + best_candidate = mock.sentinel.best_candidate + ds_sel.select_datastore.return_value = best_candidate + + req = mock.sentinel.req + self.assertEqual(best_candidate, + self._driver._select_datastore(req, host_1)) + + ds_sel.select_datastore.assert_called_once_with(req, hosts=[host_1]) + self.assertFalse(vops.get_cluster_hosts.called) + + @mock.patch.object(VMDK_DRIVER, 'volumeops') + @mock.patch.object(VMDK_DRIVER, 'ds_sel') + def test_select_datastore_with_empty_clusters(self, ds_sel, vops): + self._driver._clusters = None + + best_candidate = mock.sentinel.best_candidate + ds_sel.select_datastore.return_value = best_candidate + + req = mock.sentinel.req + self.assertEqual(best_candidate, self._driver._select_datastore(req)) + + ds_sel.select_datastore.assert_called_once_with(req, hosts=None) + self.assertFalse(vops.get_cluster_hosts.called) + + @mock.patch.object(VMDK_DRIVER, 'volumeops') + @mock.patch.object(VMDK_DRIVER, 'ds_sel') + def test_select_datastore_with_no_valid_host(self, ds_sel, vops): + cls_1 = mock.sentinel.cls_1 + cls_2 = mock.sentinel.cls_2 + self._driver._clusters = [cls_1, cls_2] + + vops.get_cluster_hosts.side_effect = [[], []] + + req = mock.sentinel.req + self.assertRaises(vmdk_exceptions.NoValidHostException, + self._driver._select_datastore, req) + + exp_calls = [mock.call(cls_1), mock.call(cls_2)] + self.assertEqual(exp_calls, vops.get_cluster_hosts.call_args_list) + + self.assertFalse(ds_sel.called) + @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_relocate_backing_nop(self, ds_sel, vops): diff --git a/cinder/tests/unit/test_vmware_volumeops.py b/cinder/tests/unit/test_vmware_volumeops.py index a502de2a2..be37cb77f 100644 --- a/cinder/tests/unit/test_vmware_volumeops.py +++ b/cinder/tests/unit/test_vmware_volumeops.py @@ -1510,6 +1510,58 @@ class VolumeOpsTestCase(test.TestCase): eagerZero=False) self.session.wait_for_task.assert_called_once_with(task) + @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' + '_get_all_clusters') + def test_get_cluster_refs(self, get_all_clusters): + cls_1 = mock.sentinel.cls_1 + cls_2 = mock.sentinel.cls_2 + clusters = {"cls_1": cls_1, "cls_2": cls_2} + get_all_clusters.return_value = clusters + + self.assertEqual({"cls_2": cls_2}, + self.vops.get_cluster_refs(["cls_2"])) + + @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' + '_get_all_clusters') + def test_get_cluster_refs_with_invalid_cluster(self, get_all_clusters): + cls_1 = mock.sentinel.cls_1 + cls_2 = mock.sentinel.cls_2 + clusters = {"cls_1": cls_1, "cls_2": cls_2} + get_all_clusters.return_value = clusters + + self.assertRaises(vmdk_exceptions.ClusterNotFoundException, + self.vops.get_cluster_refs, + ["cls_1", "cls_3"]) + + def test_get_cluster_hosts(self): + host_1 = mock.sentinel.host_1 + host_2 = mock.sentinel.host_2 + hosts = mock.Mock(ManagedObjectReference=[host_1, host_2]) + self.session.invoke_api.return_value = hosts + + cluster = mock.sentinel.cluster + ret = self.vops.get_cluster_hosts(cluster) + + self.assertEqual([host_1, host_2], ret) + self.session.invoke_api.assert_called_once_with(vim_util, + 'get_object_property', + self.session.vim, + cluster, + 'host') + + def test_get_cluster_hosts_with_no_host(self): + self.session.invoke_api.return_value = None + + cluster = mock.sentinel.cluster + ret = self.vops.get_cluster_hosts(cluster) + + self.assertEqual([], ret) + self.session.invoke_api.assert_called_once_with(vim_util, + 'get_object_property', + self.session.vim, + cluster, + 'host') + class VirtualDiskPathTest(test.TestCase): """Unit tests for VirtualDiskPath.""" diff --git a/cinder/volume/drivers/vmware/datastore.py b/cinder/volume/drivers/vmware/datastore.py index 8bee6b137..512b3b5b2 100644 --- a/cinder/volume/drivers/vmware/datastore.py +++ b/cinder/volume/drivers/vmware/datastore.py @@ -205,7 +205,7 @@ class DatastoreSelector(object): if profile_name is not None: profile_id = self.get_profile_id(profile_name) - if hosts is None: + if not hosts: hosts = self._get_all_hosts() LOG.debug("Using hosts: %(hosts)s for datastore selection based on " diff --git a/cinder/volume/drivers/vmware/exceptions.py b/cinder/volume/drivers/vmware/exceptions.py index 6c44d90c8..2fa3209fa 100644 --- a/cinder/volume/drivers/vmware/exceptions.py +++ b/cinder/volume/drivers/vmware/exceptions.py @@ -45,3 +45,13 @@ class ProfileNotFoundException(exceptions.VMwareDriverException): class NoValidDatastoreException(exceptions.VMwareDriverException): """Thrown when there are no valid datastores.""" message = _("There are no valid datastores.") + + +class ClusterNotFoundException(exceptions.VMwareDriverException): + """Thrown when the given cluster cannot be found.""" + message = _("Compute cluster: %(cluster)s not found.") + + +class NoValidHostException(exceptions.VMwareDriverException): + """Thrown when there are no valid ESX hosts.""" + message = _("There are no valid ESX hosts.") diff --git a/cinder/volume/drivers/vmware/vmdk.py b/cinder/volume/drivers/vmware/vmdk.py index b6040676b..c374af31b 100644 --- a/cinder/volume/drivers/vmware/vmdk.py +++ b/cinder/volume/drivers/vmware/vmdk.py @@ -117,6 +117,10 @@ vmdk_opts = [ 'verified. If false, then the default CA truststore is ' 'used for verification. This option is ignored if ' '"vmware_ca_file" is set.'), + cfg.MultiStrOpt('vmware_cluster_name', + default=None, + help='Name of a vCenter compute cluster where volumes ' + 'should be created.'), ] CONF = cfg.CONF @@ -226,6 +230,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): # directly to ESX self._storage_policy_enabled = False self._ds_sel = None + self._clusters = None @property def session(self): @@ -461,13 +466,28 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): def _relocate_backing(self, volume, backing, host): pass + def _get_hosts(self, clusters): + hosts = [] + if clusters: + for cluster in clusters: + hosts.extend(self.volumeops.get_cluster_hosts(cluster)) + return hosts + def _select_datastore(self, req, host=None): """Selects datastore satisfying the given requirements. :return: (host, resource_pool, summary) """ + hosts = None + if host: + hosts = [host] + elif self._clusters: + hosts = self._get_hosts(self._clusters) + if not hosts: + LOG.error(_LE("There are no valid hosts available in " + "configured cluster(s): %s."), self._clusters) + raise vmdk_exceptions.NoValidHostException() - hosts = [host] if host else None best_candidate = self.ds_sel.select_datastore(req, hosts=hosts) if not best_candidate: LOG.error(_LE("There is no valid datastore satisfying " @@ -1836,6 +1856,13 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver): self._volumeops = volumeops.VMwareVolumeOps(self.session, max_objects) self._ds_sel = hub.DatastoreSelector(self.volumeops, self.session) + # Get clusters to be used for backing VM creation. + cluster_names = self.configuration.vmware_cluster_name + if cluster_names: + self._clusters = self.volumeops.get_cluster_refs( + cluster_names).values() + LOG.info(_LI("Using compute cluster(s): %s."), cluster_names) + LOG.info(_LI("Successfully setup driver: %(driver)s for server: " "%(ip)s."), {'driver': self.__class__.__name__, 'ip': self.configuration.vmware_host_ip}) @@ -1889,15 +1916,7 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver): req[hub.DatastoreSelector.PROFILE_NAME] = backing_profile # Select datastore satisfying the requirements. - best_candidate = self.ds_sel.select_datastore(req, hosts=[host]) - if not best_candidate: - # No candidate datastore to relocate. - msg = _("There are no datastores matching volume requirements;" - " can't relocate volume: %s.") % volume['name'] - LOG.error(msg) - raise vmdk_exceptions.NoValidDatastoreException(msg) - - (host, resource_pool, summary) = best_candidate + (host, resource_pool, summary) = self._select_datastore(req, host) dc = self.volumeops.get_dc(resource_pool) folder = self._get_volume_group_folder(dc) diff --git a/cinder/volume/drivers/vmware/volumeops.py b/cinder/volume/drivers/vmware/volumeops.py index a54d6ca1e..358cfd636 100644 --- a/cinder/volume/drivers/vmware/volumeops.py +++ b/cinder/volume/drivers/vmware/volumeops.py @@ -1413,3 +1413,49 @@ class VMwareVolumeOps(object): profile_manager, profileIds=profile_ids) return profiles[0].name + + def _get_all_clusters(self): + clusters = {} + retrieve_result = self._session.invoke_api(vim_util, 'get_objects', + self._session.vim, + 'ClusterComputeResource', + self._max_objects) + while retrieve_result: + if retrieve_result.objects: + for cluster in retrieve_result.objects: + name = urllib.unquote(cluster.propSet[0].val) + clusters[name] = cluster.obj + retrieve_result = self.continue_retrieval(retrieve_result) + return clusters + + def get_cluster_refs(self, names): + """Get references to given clusters. + + :param names: list of cluster names + :return: Dictionary of cluster names to references + """ + clusters = self._get_all_clusters() + for name in names: + if name not in clusters: + LOG.error(_LE("Compute cluster: %s not found."), name) + raise vmdk_exceptions.ClusterNotFoundException(cluster=name) + + return {name: clusters[name] for name in names} + + def get_cluster_hosts(self, cluster): + """Get hosts in the given cluster. + + :param cluster: cluster reference + :return: references to hosts in the cluster + """ + hosts = self._session.invoke_api(vim_util, + 'get_object_property', + self._session.vim, + cluster, + 'host') + + host_refs = [] + if hosts and hosts.ManagedObjectReference: + host_refs.extend(hosts.ManagedObjectReference) + + return host_refs -- 2.45.2