From: Subramanian Neelakantan Date: Wed, 11 Dec 2013 13:49:37 +0000 (+0530) Subject: vmware: Storage policy based volume placement. X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=848ef0043f60795db680afe8f67b633459eaf52c;p=openstack-build%2Fcinder-build.git vmware: Storage policy based volume placement. This change adds a new feature to vmdk cinder driver. Users will be able to specify a vSphere storage policy in a volume-type. Creating a volume with this volume-type associates the volume with the storage policy in vSphere. The driver places this volume only on a datastore that satisfies this storage policy. Implements: blueprint vmdk-storage-policy-volume-type Change-Id: I84585e7d5fc8b28f8cfed98cb621b5b6ce9435e0 --- diff --git a/cinder/tests/test_vmware_vmdk.py b/cinder/tests/test_vmware_vmdk.py index 9ca3b1438..2083bb561 100644 --- a/cinder/tests/test_vmware_vmdk.py +++ b/cinder/tests/test_vmware_vmdk.py @@ -17,6 +17,7 @@ Test suite for VMware VMDK driver. """ +import mock import mox from cinder import exception @@ -215,7 +216,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase): """Test get_volume_stats.""" stats = self._driver.get_volume_stats() self.assertEqual(stats['vendor_name'], 'VMware') - self.assertEqual(stats['driver_version'], '1.1.0') + self.assertEqual(stats['driver_version'], self._driver.VERSION) self.assertEqual(stats['storage_protocol'], 'LSI Logic SCSI') self.assertEqual(stats['reserved_percentage'], 0) self.assertEqual(stats['total_capacity_gb'], 'unknown') @@ -441,28 +442,36 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase): m.UnsetStubs() m.VerifyAll() - def test_get_folder_ds_summary(self): + @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.' + 'session', new_callable=mock.PropertyMock) + @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.' + 'volumeops', new_callable=mock.PropertyMock) + def test_get_folder_ds_summary(self, volumeops, session): """Test _get_folder_ds_summary.""" - m = self.mox - m.StubOutWithMock(self._driver.__class__, 'volumeops') - self._driver.volumeops = self._volumeops - size = 1 - resource_pool = FakeMor('ResourcePool', 'my_rp') - datacenter = FakeMor('Datacenter', 'my_dc') - m.StubOutWithMock(self._volumeops, 'get_dc') - self._volumeops.get_dc(resource_pool).AndReturn(datacenter) - m.StubOutWithMock(self._driver, '_get_volume_group_folder') - folder = FakeMor('Folder', 'my_fol') - self._driver._get_volume_group_folder(datacenter).AndReturn(folder) - m.StubOutWithMock(self._driver, '_select_datastore_summary') - size = 1 - datastores = [FakeMor('Datastore', 'my_ds')] - self._driver._select_datastore_summary(size * units.GiB, datastores) - - m.ReplayAll() - self._driver._get_folder_ds_summary(size, resource_pool, datastores) - m.UnsetStubs() - m.VerifyAll() + volumeops = volumeops.return_value + driver = self._driver + volume = {'size': 10, 'volume_type_id': 'fake_type'} + rp = mock.sentinel.resource_pool + dss = mock.sentinel.datastores + # patch method calls from _get_folder_ds_summary + volumeops.get_dc.return_value = mock.sentinel.dc + volumeops.get_vmfolder.return_value = mock.sentinel.folder + driver._get_storage_profile = mock.MagicMock() + driver._select_datastore_summary = mock.MagicMock() + driver._select_datastore_summary.return_value = mock.sentinel.summary + # call _get_folder_ds_summary + (folder, datastore_summary) = driver._get_folder_ds_summary(volume, + rp, dss) + # verify returned values and calls made + self.assertEqual(mock.sentinel.folder, folder, + "Folder returned is wrong.") + self.assertEqual(mock.sentinel.summary, datastore_summary, + "Datastore summary returned is wrong.") + volumeops.get_dc.assert_called_once_with(rp) + volumeops.get_vmfolder.assert_called_once_with(mock.sentinel.dc) + driver._get_storage_profile.assert_called_once_with(volume) + size = volume['size'] * units.GiB + driver._select_datastore_summary.assert_called_once_with(size, dss) def test_get_disk_type(self): """Test _get_disk_type.""" @@ -494,7 +503,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase): m.StubOutWithMock(self._driver, '_get_folder_ds_summary') folder = FakeMor('Folder', 'my_fol') summary = FakeDatastoreSummary(1, 1) - self._driver._get_folder_ds_summary(volume['size'], resource_pool, + self._driver._get_folder_ds_summary(volume, resource_pool, datastores).AndReturn((folder, summary)) backing = FakeMor('VirtualMachine', 'my_back') @@ -503,6 +512,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase): volume['size'] * units.MiB, mox.IgnoreArg(), folder, resource_pool, host, + mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(backing) m.ReplayAll() @@ -877,17 +887,17 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase): (host, rp, folder, summary) = (FakeObject(), FakeObject(), FakeObject(), FakeObject()) summary.name = "datastore-1" - m.StubOutWithMock(self._driver, '_select_ds_for_volume') - self._driver._select_ds_for_volume(size_gb).AndReturn((host, rp, - folder, - summary)) - # _get_disk_type call vol_name = 'volume name' volume = FakeObject() volume['name'] = vol_name volume['size'] = size_gb volume['volume_type_id'] = None # _get_disk_type will return 'thin' disk_type = 'thin' + m.StubOutWithMock(self._driver, '_select_ds_for_volume') + self._driver._select_ds_for_volume(volume).AndReturn((host, rp, + folder, + summary)) + # _get_create_spec call m.StubOutWithMock(self._volumeops, '_get_create_spec') self._volumeops._get_create_spec(vol_name, 0, disk_type, @@ -1043,8 +1053,11 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase): class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase): """Test class for VMwareVcVmdkDriver.""" + PBM_WSDL = '/fake/wsdl/path' + def setUp(self): super(VMwareVcVmdkDriverTestCase, self).setUp() + self.flags(vmware_pbm_wsdl=self.PBM_WSDL) self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config) def test_init_conn_with_instance_and_backing(self): @@ -1124,7 +1137,7 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase): folder = FakeMor('Folder', 'my_fol') summary = FakeDatastoreSummary(1, 1, datastore1) size = 1 - self._driver._get_folder_ds_summary(size, resource_pool, + self._driver._get_folder_ds_summary(volume, resource_pool, [datastore1]).AndReturn((folder, summary)) m.StubOutWithMock(self._volumeops, 'relocate_backing') @@ -1165,23 +1178,15 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase): m = self.mox m.StubOutWithMock(self._driver.__class__, 'volumeops') self._driver.volumeops = self._volumeops - m.StubOutWithMock(self._volumeops, 'get_host') backing = FakeMor('VirtualMachine', 'my_vm') - host = FakeMor('HostSystem', 'my_host') - self._volumeops.get_host(backing).AndReturn(host) - m.StubOutWithMock(self._volumeops, 'get_dss_rp') datastore = FakeMor('Datastore', 'my_ds') - datastores = [datastore] - resource_pool = FakeMor('ResourcePool', 'my_rp') - self._volumeops.get_dss_rp(host).AndReturn((datastores, - resource_pool)) - m.StubOutWithMock(self._driver, '_select_datastore_summary') + m.StubOutWithMock(self._driver, '_select_ds_for_volume') volume = FakeObject() volume['name'] = 'volume_name' volume['size'] = 1 summary = FakeDatastoreSummary(1, 1, datastore=datastore) - self._driver._select_datastore_summary(volume['size'] * units.GiB, - datastores).AndReturn(summary) + self._driver._select_ds_for_volume(volume).AndReturn((_, _, _, + summary)) m.StubOutWithMock(self._volumeops, 'clone_backing') self._volumeops.clone_backing(volume['name'], backing, mox.IgnoreArg(), @@ -1290,3 +1295,111 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase): self._driver.create_cloned_volume, volume, src_vref) m.UnsetStubs() m.VerifyAll() + + @mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs') + def test_get_storage_profile(self, get_volume_type_extra_specs): + """Test vmdk _get_storage_profile.""" + + # Test volume with no type id returns None + volume = FakeObject() + volume['volume_type_id'] = None + sp = self._driver._get_storage_profile(volume) + self.assertEqual(None, sp, "Without a volume_type_id no storage " + "profile should be returned.") + + # Test volume with type id calls extra specs + fake_id = 'fake_volume_id' + volume['volume_type_id'] = fake_id + self._driver._get_storage_profile(volume) + spec_key = 'vmware:storage_profile' + get_volume_type_extra_specs.assert_called_once_with(fake_id, spec_key) + + @mock.patch('cinder.volume.drivers.vmware.vim_util.' + 'convert_datastores_to_hubs') + @mock.patch('cinder.volume.drivers.vmware.vim_util.' + 'convert_hubs_to_datastores') + @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' + 'session', new_callable=mock.PropertyMock) + @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' + 'volumeops', new_callable=mock.PropertyMock) + def test_filter_ds_by_profile(self, volumeops, session, hubs_to_ds, + ds_to_hubs): + """Test vmdk _filter_ds_by_profile() method.""" + + volumeops = volumeops.return_value + session = session.return_value + + # Test with no profile id + datastores = [mock.sentinel.ds1, mock.sentinel.ds2] + profile = 'fake_profile' + volumeops.retrieve_profile_id.return_value = None + self.assertRaises(error_util.VimException, + self._driver._filter_ds_by_profile, + datastores, profile) + volumeops.retrieve_profile_id.assert_called_once_with(profile) + + # Test with a fake profile id + profileId = 'fake_profile_id' + filtered_dss = [mock.sentinel.ds1] + # patch method calls from _filter_ds_by_profile + volumeops.retrieve_profile_id.return_value = profileId + pbm_cf = mock.sentinel.pbm_cf + session.pbm.client.factory = pbm_cf + hubs = [mock.sentinel.hub1, mock.sentinel.hub2] + ds_to_hubs.return_value = hubs + volumeops.filter_matching_hubs.return_value = mock.sentinel.hubs + hubs_to_ds.return_value = filtered_dss + # call _filter_ds_by_profile with a fake profile + actual_dss = self._driver._filter_ds_by_profile(datastores, profile) + # verify return value and called methods + self.assertEqual(filtered_dss, actual_dss, + "Wrong filtered datastores returned.") + ds_to_hubs.assert_called_once_with(pbm_cf, datastores) + volumeops.filter_matching_hubs.assert_called_once_with(hubs, + profileId) + hubs_to_ds.assert_called_once_with(mock.sentinel.hubs, datastores) + + @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' + 'session', new_callable=mock.PropertyMock) + @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' + 'volumeops', new_callable=mock.PropertyMock) + def test_get_folder_ds_summary(self, volumeops, session): + """Test _get_folder_ds_summary.""" + volumeops = volumeops.return_value + driver = self._driver + driver._storage_policy_enabled = True + volume = {'size': 10, 'volume_type_id': 'fake_type'} + rp = mock.sentinel.resource_pool + dss = [mock.sentinel.datastore1, mock.sentinel.datastore2] + filtered_dss = [mock.sentinel.datastore1] + profile = mock.sentinel.profile + + def filter_ds(datastores, storage_profile): + return filtered_dss + + # patch method calls from _get_folder_ds_summary + volumeops.get_dc.return_value = mock.sentinel.dc + volumeops.get_vmfolder.return_value = mock.sentinel.vmfolder + volumeops.create_folder.return_value = mock.sentinel.folder + driver._get_storage_profile = mock.MagicMock() + driver._get_storage_profile.return_value = profile + driver._filter_ds_by_profile = mock.MagicMock(side_effect=filter_ds) + driver._select_datastore_summary = mock.MagicMock() + driver._select_datastore_summary.return_value = mock.sentinel.summary + # call _get_folder_ds_summary + (folder, datastore_summary) = driver._get_folder_ds_summary(volume, + rp, dss) + # verify returned values and calls made + self.assertEqual(mock.sentinel.folder, folder, + "Folder returned is wrong.") + self.assertEqual(mock.sentinel.summary, datastore_summary, + "Datastore summary returned is wrong.") + volumeops.get_dc.assert_called_once_with(rp) + volumeops.get_vmfolder.assert_called_once_with(mock.sentinel.dc) + volumeops.create_folder.assert_called_once_with(mock.sentinel.vmfolder, + self.VOLUME_FOLDER) + driver._get_storage_profile.assert_called_once_with(volume) + driver._filter_ds_by_profile.assert_called_once_with(dss, profile) + size = volume['size'] * units.GiB + driver._select_datastore_summary.assert_called_once_with(size, + filtered_dss) diff --git a/cinder/tests/test_vmware_volumeops.py b/cinder/tests/test_vmware_volumeops.py index 29925c6f6..1c1b7dc2e 100644 --- a/cinder/tests/test_vmware_volumeops.py +++ b/cinder/tests/test_vmware_volumeops.py @@ -442,7 +442,7 @@ class VolumeOpsTestCase(test.TestCase): resource_pool, host, ds_name) self.assertEqual(mock.sentinel.result, ret) get_create_spec.assert_called_once_with(name, size_kb, disk_type, - ds_name) + ds_name, None) self.session.invoke_api.assert_called_once_with(self.session.vim, 'CreateVM_Task', folder, diff --git a/cinder/volume/drivers/vmware/api.py b/cinder/volume/drivers/vmware/api.py index e7ca916b0..ecb436d06 100644 --- a/cinder/volume/drivers/vmware/api.py +++ b/cinder/volume/drivers/vmware/api.py @@ -1,5 +1,3 @@ -# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2013 VMware, Inc. # All Rights Reserved. # @@ -23,6 +21,7 @@ Provides abstraction over cinder.volume.drivers.vmware.vim.Vim SOAP calls. from cinder.openstack.common import log as logging from cinder.openstack.common import loopingcall from cinder.volume.drivers.vmware import error_util +from cinder.volume.drivers.vmware import pbm from cinder.volume.drivers.vmware import vim from cinder.volume.drivers.vmware import vim_util @@ -98,7 +97,7 @@ class VMwareAPISession(object): @Retry(exceptions=(Exception)) def __init__(self, server_ip, server_username, server_password, api_retry_count, task_poll_interval, scheme='https', - create_session=True, wsdl_loc=None): + create_session=True, wsdl_loc=None, pbm_wsdl=None): """Constructs session object. :param server_ip: IP address of ESX/VC server @@ -111,8 +110,10 @@ class VMwareAPISession(object): :param scheme: http or https protocol :param create_session: Boolean whether to set up connection at the time of instance creation - :param wsdl_loc: WSDL file location for invoking SOAP calls on server - using suds + :param wsdl_loc: VIM WSDL file location for invoking SOAP calls on + server using suds + :param pbm_wsdl: PBM WSDL file location. If set to None the storage + policy related functionality will be disabled. """ self._server_ip = server_ip self._server_username = server_username @@ -123,6 +124,8 @@ class VMwareAPISession(object): self._scheme = scheme self._session_id = None self._vim = None + self._pbm_wsdl = pbm_wsdl + self._pbm = None if create_session: self.create_session() @@ -133,6 +136,14 @@ class VMwareAPISession(object): wsdl_loc=self._wsdl_loc) return self._vim + @property + def pbm(self): + if not self._pbm and self._pbm_wsdl: + self._pbm = pbm.PBMClient(self.vim, self._pbm_wsdl, + protocol=self._scheme, + host=self._server_ip) + return self._pbm + def create_session(self): """Establish session with the server.""" # Login and setup the session with the server for making @@ -157,15 +168,23 @@ class VMwareAPISession(object): LOG.exception(_("Error while terminating session: %s.") % excep) self._session_id = session.key + if self.pbm: + self.pbm.set_cookie() LOG.info(_("Successfully established connection to the server.")) def __del__(self): - """Logs-out the session.""" + """Logs-out the sessions.""" try: self.vim.Logout(self.vim.service_content.sessionManager) except Exception as excep: - LOG.exception(_("Error while logging out the user: %s.") % + LOG.exception(_("Error while logging out from vim session: %s."), excep) + if self._pbm: + try: + self.pbm.Logout(self.pbm.service_content.sessionManager) + except Exception as excep: + LOG.exception(_("Error while logging out from pbm session: " + "%s."), excep) def invoke_api(self, module, method, *args, **kwargs): """Wrapper method for invoking APIs. diff --git a/cinder/volume/drivers/vmware/pbm.py b/cinder/volume/drivers/vmware/pbm.py new file mode 100644 index 000000000..1baa69b8e --- /dev/null +++ b/cinder/volume/drivers/vmware/pbm.py @@ -0,0 +1,96 @@ +# Copyright (c) 2013 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Class for making VMware PBM SOAP calls. + +This is used for storage policy based placement of volumes. Read more about +it here: +http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vspsdk.apiref.doc/\ +right-pane.html +""" + +import suds +import suds.sax.element as element + +from cinder.openstack.common import log as logging +from cinder.volume.drivers.vmware import vim as vim_module +from cinder.volume.drivers.vmware import vim_util + +LOG = logging.getLogger(__name__) +SERVICE_INSTANCE = 'ServiceInstance' +SERVICE_TYPE = 'PbmServiceInstance' + + +class PBMClient(vim_module.Vim): + """Sets up a client to interact with the vSphere PBM APIs. + + This client piggy backs on Vim object's authenticated cookie to invoke + PBM API calls. + + Note that this class needs the PBM wsdl file in order to make SOAP API + calls. This wsdl file is included in the VMware Storage Policy SDK. + A user of this feature needs to install this SDK on the Cinder volume + nodes and configure the path in the cinder.conf file. + """ + + def __init__(self, vimSession, pbm_wsdl, protocol='https', + host='localhost'): + """Constructs a PBM client object. + + :param vimSession: an authenticated api.VMwareAPISession object + :param pbm_wsdl: URL path to where pbmService.wsdl file is located. + :param protocol: http or https + :param host: Server IPAddress[:port] or Hostname[:port] + """ + self._vimSession = vimSession + self._url = vim_util.get_soap_url(protocol, host, 'pbm') + # create the pbm client + self._client = suds.client.Client(pbm_wsdl, location=self._url) + PBMClient._copy_client_cookie(self._vimSession, self._client) + # Get the PBM service content + si_moref = vim_module.get_moref(SERVICE_INSTANCE, SERVICE_TYPE) + self._sc = self._client.service.PbmRetrieveServiceContent(si_moref) + + @staticmethod + def _copy_client_cookie(vimSession, pbmClient): + """Copy the vim session cookie to pbm client soap header. + + :param vimSession: an vim session authenticated with VC/ESX + :param pbmClient: a PBMClient object to set the session cookie + """ + vcSessionCookie = PBMClient._get_vc_session_cookie(vimSession) + vcc = element.Element('vcSessionCookie').setText(vcSessionCookie) + pbmClient.set_options(soapheaders=vcc) + + @staticmethod + def _get_vc_session_cookie(vimSession): + """Look for vmware_soap_session cookie in vimSession.""" + cookies = vimSession.client.options.transport.cookiejar + for c in cookies: + if c.name.lower() == 'vmware_soap_session': + return c.value + + @property + def service_content(self): + return self._sc + + @property + def client(self): + return self._client + + def set_cookie(self): + """Set the authenticated vim session cookie in this pbm client.""" + PBMClient._copy_client_cookie(self._vimSession, self.client) diff --git a/cinder/volume/drivers/vmware/vim.py b/cinder/volume/drivers/vmware/vim.py index 0d02543e0..3bad426cc 100644 --- a/cinder/volume/drivers/vmware/vim.py +++ b/cinder/volume/drivers/vmware/vim.py @@ -18,9 +18,11 @@ Classes for making VMware VI SOAP calls. """ import httplib + import suds from cinder.volume.drivers.vmware import error_util +from cinder.volume.drivers.vmware import vim_util RESP_NOT_XML_ERROR = "Response is 'text/html', not 'text/xml'" CONN_ABORT_ERROR = 'Software caused connection abort' @@ -80,7 +82,7 @@ class Vim(object): self._host_name = host if not wsdl_loc: wsdl_loc = Vim._get_wsdl_loc(protocol, host) - soap_url = Vim._get_soap_url(protocol, host) + soap_url = vim_util.get_soap_url(protocol, host) self._client = suds.client.Client(wsdl_loc, location=soap_url, plugins=[VIMMessagePlugin()]) self._service_content = self.RetrieveServiceContent('ServiceInstance') @@ -93,17 +95,7 @@ class Vim(object): :param host_name: ESX/VC server host name :return: Default WSDL file location hosted at the server """ - return '%s://%s/sdk/vimService.wsdl' % (protocol, host_name) - - @staticmethod - def _get_soap_url(protocol, host_name): - """Return URL to SOAP services for ESX/VC server. - - :param protocol: https or http - :param host_name: ESX/VC server host name - :return: URL to SOAP services for ESX/VC server - """ - return '%s://%s/sdk' % (protocol, host_name) + return vim_util.get_soap_url(protocol, host_name) + '/vimService.wsdl' @property def service_content(self): diff --git a/cinder/volume/drivers/vmware/vim_util.py b/cinder/volume/drivers/vmware/vim_util.py index 06e310058..948773243 100644 --- a/cinder/volume/drivers/vmware/vim_util.py +++ b/cinder/volume/drivers/vmware/vim_util.py @@ -17,6 +17,21 @@ The VMware API utility module. """ +import netaddr + + +def get_soap_url(protocol, host, path='sdk'): + """Return URL to SOAP services for ESX/VC server. + + :param protocol: https or http + :param host: ESX/VC server host IP + :param path: path part of the SOAP URL + :return: URL to SOAP services for ESX/VC server + """ + if netaddr.valid_ipv6(host): + return '%s://[%s]/%s' % (protocol, host, path) + return '%s://%s/%s' % (protocol, host, path) + def build_selection_spec(client_factory, name): """Builds the selection spec. @@ -299,3 +314,31 @@ def get_object_property(vim, mobj, property_name): if prop: prop_val = prop[0].val return prop_val + + +def convert_datastores_to_hubs(pbm_client_factory, datastores): + """Convert Datastore morefs to PbmPlacementHub morefs. + + :param pbm_client_factory: pbm client factory + :param datastores: list of datastore morefs + :returns: list of PbmPlacementHub morefs + """ + hubs = [] + for ds in datastores: + hub = pbm_client_factory.create('ns0:PbmPlacementHub') + hub.hubId = ds.value + hub.hubType = 'Datastore' + hubs.append(hub) + return hubs + + +def convert_hubs_to_datastores(hubs, datastores): + """Get filtered subset of datastores as represented by hubs. + + :param hubs: represents a sub set of datastore ids + :param datastores: represents all candidate datastores + :returns: that subset of datastores objects that are also present in hubs + """ + hubIds = [hub.hubId for hub in hubs] + filtered_dss = [ds for ds in datastores if ds.value in hubIds] + return filtered_dss diff --git a/cinder/volume/drivers/vmware/vmdk.py b/cinder/volume/drivers/vmware/vmdk.py index c670c675a..176280238 100644 --- a/cinder/volume/drivers/vmware/vmdk.py +++ b/cinder/volume/drivers/vmware/vmdk.py @@ -31,6 +31,7 @@ from cinder.volume import driver from cinder.volume.drivers.vmware import api from cinder.volume.drivers.vmware import error_util from cinder.volume.drivers.vmware import vim +from cinder.volume.drivers.vmware import vim_util from cinder.volume.drivers.vmware import vmware_images from cinder.volume.drivers.vmware import volumeops from cinder.volume import volume_types @@ -79,14 +80,19 @@ vmdk_opts = [ 'Query results will be obtained in batches from the ' 'server and not in one shot. Server may still limit the ' 'count to something less than the configured value.'), + cfg.StrOpt('vmware_pbm_wsdl', + help='PBM service WSDL file location URL. ' + 'e.g. file:///opt/SDK/spbm/wsdl/pbmService.wsdl. ' + 'Not setting this will disable storage policy based ' + 'placement of volumes.'), ] CONF = cfg.CONF CONF.register_opts(vmdk_opts) -def _get_volume_type_extra_spec(type_id, spec_key, possible_values, - default_value): +def _get_volume_type_extra_spec(type_id, spec_key, possible_values=None, + default_value=None): """Get extra spec value. If the spec value is not present in the input possible_values, then @@ -99,30 +105,38 @@ def _get_volume_type_extra_spec(type_id, spec_key, possible_values, :param type_id: Volume type ID :param spec_key: Extra spec key - :param possible_values: Permitted values for the extra spec + :param possible_values: Permitted values for the extra spec if known :param default_value: Default value for the extra spec incase of an invalid value or if the entry does not exist :return: extra spec value """ - if type_id: - spec_key = ('vmware:%s') % spec_key - spec_value = volume_types.get_volume_type_extra_specs(type_id, - spec_key) - if spec_value in possible_values: - LOG.debug(_("Returning spec value %s") % spec_value) - return spec_value + if not type_id: + return default_value - LOG.debug(_("Invalid spec value: %s specified.") % spec_value) + spec_key = ('vmware:%s') % spec_key + spec_value = volume_types.get_volume_type_extra_specs(type_id, + spec_key) + if not spec_value: + LOG.debug(_("Returning default spec value: %s.") % default_value) + return default_value - # Default we return thin disk type - LOG.debug(_("Returning default spec value: %s.") % default_value) - return default_value + if possible_values is None: + return spec_value + + if spec_value in possible_values: + LOG.debug(_("Returning spec value %s") % spec_value) + return spec_value + + LOG.debug(_("Invalid spec value: %s specified.") % spec_value) class VMwareEsxVmdkDriver(driver.VolumeDriver): """Manage volumes on VMware ESX server.""" - VERSION = '1.1.0' + # 1.0 - initial version of driver + # 1.1.0 - selection of datastore based on number of host mounts + # 1.2.0 - storage profile volume types based placement of volumes + VERSION = '1.2.0' def __init__(self, *args, **kwargs): super(VMwareEsxVmdkDriver, self).__init__(*args, **kwargs) @@ -130,6 +144,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): self._session = None self._stats = None self._volumeops = None + # No storage policy based placement possible when connecting + # directly to ESX + self._storage_policy_enabled = False @property def session(self): @@ -295,19 +312,68 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): {'datastore': best_summary, 'host_count': max_host_count}) return best_summary - def _get_folder_ds_summary(self, size_gb, resource_pool, datastores): + def _get_storage_profile(self, volume): + """Get storage profile associated with this volume's volume_type. + + :param volume: volume whose storage profile should be queried + :return: string value of storage profile if volume type is associated, + None otherwise + """ + type_id = volume['volume_type_id'] + if not type_id: + return + return _get_volume_type_extra_spec(type_id, 'storage_profile') + + def _filter_ds_by_profile(self, datastores, storage_profile): + """Filter out datastores that do not match given storage profile. + + :param datastores: list of candidate datastores + :param storage_profile: storage profile name required to be satisfied + :return: subset of datastores that match storage_profile, or empty list + if none of the datastores match + """ + LOG.debug(_("Filter datastores matching storage profile %(profile)s: " + "%(dss)s."), + {'profile': storage_profile, 'dss': datastores}) + profileId = self.volumeops.retrieve_profile_id(storage_profile) + if not profileId: + msg = _("No such storage profile '%s; is defined in vCenter.") + LOG.error(msg, storage_profile) + raise error_util.VimException(msg % storage_profile) + pbm_cf = self.session.pbm.client.factory + hubs = vim_util.convert_datastores_to_hubs(pbm_cf, datastores) + filtered_hubs = self.volumeops.filter_matching_hubs(hubs, profileId) + return vim_util.convert_hubs_to_datastores(filtered_hubs, datastores) + + def _get_folder_ds_summary(self, volume, resource_pool, datastores): """Get folder and best datastore summary where volume can be placed. - :param size_gb: Size of the volume in GB + :param volume: volume to place into one of the datastores :param resource_pool: Resource pool reference :param datastores: Datastores from which a choice is to be made for the volume :return: Folder and best datastore summary where volume can be - placed on + placed on. """ datacenter = self.volumeops.get_dc(resource_pool) folder = self._get_volume_group_folder(datacenter) - size_bytes = size_gb * units.GiB + storage_profile = self._get_storage_profile(volume) + if self._storage_policy_enabled and storage_profile: + LOG.debug(_("Storage profile required for this volume: %s."), + storage_profile) + datastores = self._filter_ds_by_profile(datastores, + storage_profile) + if not datastores: + msg = _("Aborting since none of the datastores match the " + "given storage profile %s.") + LOG.error(msg, storage_profile) + raise error_util.VimException(msg % storage_profile) + elif storage_profile: + LOG.warn(_("Ignoring storage profile %s requirement for this " + "volume since policy based placement is " + "disabled."), storage_profile) + + size_bytes = volume['size'] * units.GiB datastore_summary = self._select_datastore_summary(size_bytes, datastores) return (folder, datastore_summary) @@ -335,22 +401,29 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): # Get datastores and resource pool of the host (datastores, resource_pool) = self.volumeops.get_dss_rp(host) # Pick a folder and datastore to create the volume backing on - (folder, summary) = self._get_folder_ds_summary(volume['size'], + (folder, summary) = self._get_folder_ds_summary(volume, resource_pool, datastores) disk_type = VMwareEsxVmdkDriver._get_disk_type(volume) size_kb = volume['size'] * units.MiB + storage_profile = self._get_storage_profile(volume) + profileId = None + if self._storage_policy_enabled and storage_profile: + profile = self.volumeops.retrieve_profile_id(storage_profile) + if profile: + profileId = profile.uniqueId return self.volumeops.create_backing(volume['name'], size_kb, disk_type, folder, resource_pool, host, - summary.name) + summary.name, + profileId) - def _relocate_backing(self, size_gb, backing, host): + def _relocate_backing(self, volume, backing, host): pass - def _select_ds_for_volume(self, size_gb): + def _select_ds_for_volume(self, volume): """Select datastore that can accommodate a volume of given size. Returns the selected datastore summary along with a compute host and @@ -367,7 +440,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): host = host.obj try: (dss, rp) = self.volumeops.get_dss_rp(host) - (folder, summary) = self._get_folder_ds_summary(size_gb, + (folder, summary) = self._get_folder_ds_summary(volume, rp, dss) selected_host = host break @@ -375,7 +448,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): LOG.warn(_("Unable to find suitable datastore for volume " "of size: %(vol)s GB under host: %(host)s. " "More details: %(excep)s") % - {'vol': size_gb, + {'vol': volume['size'], 'host': host.obj, 'excep': excep}) if selected_host: self.volumeops.cancel_retrieval(retrv_result) @@ -383,7 +456,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): retrv_result = self.volumeops.continue_retrieval(retrv_result) msg = _("Unable to find host to accommodate a disk of size: %s " - "in the inventory.") % size_gb + "in the inventory.") % volume['size'] LOG.error(msg) raise error_util.VimException(msg) @@ -450,7 +523,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): backing = self._create_backing(volume, host) else: # Relocate volume is necessary - self._relocate_backing(volume['size'], backing, host) + self._relocate_backing(volume, backing, host) else: # The instance does not exist LOG.debug(_("The instance for which initialize connection " @@ -740,12 +813,12 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): """ try: # find host in which to create the volume - size_gb = volume['size'] - (host, rp, folder, summary) = self._select_ds_for_volume(size_gb) + (host, rp, folder, summary) = self._select_ds_for_volume(volume) except error_util.VimException as excep: LOG.exception(_("Exception in _select_ds_for_volume: %s.") % excep) raise excep + size_gb = volume['size'] LOG.debug(_("Selected datastore %(ds)s for new volume of size " "%(size)s GB.") % {'ds': summary.name, 'size': size_gb}) @@ -876,6 +949,29 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): class VMwareVcVmdkDriver(VMwareEsxVmdkDriver): """Manage volumes on VMware VC server.""" + def __init__(self, *args, **kwargs): + super(VMwareVcVmdkDriver, self).__init__(*args, **kwargs) + self._session = None + + @property + def session(self): + if not self._session: + ip = self.configuration.vmware_host_ip + username = self.configuration.vmware_host_username + password = self.configuration.vmware_host_password + api_retry_count = self.configuration.vmware_api_retry_count + task_poll_interval = self.configuration.vmware_task_poll_interval + wsdl_loc = self.configuration.safe_get('vmware_wsdl_location') + pbm_wsdl = self.configuration.vmware_pbm_wsdl + self._session = api.VMwareAPISession(ip, username, + password, api_retry_count, + task_poll_interval, + wsdl_loc=wsdl_loc, + pbm_wsdl=pbm_wsdl) + if pbm_wsdl: + self._storage_policy_enabled = True + return self._session + def _get_volume_group_folder(self, datacenter): """Get volume group folder. @@ -890,13 +986,13 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver): volume_folder = self.configuration.vmware_volume_folder return self.volumeops.create_folder(vm_folder, volume_folder) - def _relocate_backing(self, size_gb, backing, host): + def _relocate_backing(self, volume, backing, host): """Relocate volume backing under host and move to volume_group folder. If the volume backing is on a datastore that is visible to the host, then need not do any operation. - :param size_gb: Size of the volume in GB + :param volume: volume to be relocated :param backing: Reference to the backing :param host: Reference to the host """ @@ -917,7 +1013,8 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver): # host managing the instance. We relocate the volume's backing. # Pick a folder and datastore to relocate volume backing to - (folder, summary) = self._get_folder_ds_summary(size_gb, resource_pool, + (folder, summary) = self._get_folder_ds_summary(volume, + resource_pool, datastores) LOG.info(_("Relocating volume: %(backing)s to %(ds)s and %(rp)s.") % {'backing': backing, 'ds': summary, 'rp': resource_pool}) @@ -950,12 +1047,9 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver): """ datastore = None if not clone_type == volumeops.LINKED_CLONE_TYPE: - # Pick a datastore where to create the full clone under same host - host = self.volumeops.get_host(backing) - (datastores, resource_pool) = self.volumeops.get_dss_rp(host) - size_bytes = volume['size'] * units.GiB - datastore = self._select_datastore_summary(size_bytes, - datastores).datastore + # Pick a datastore where to create the full clone under any host + (host, rp, folder, summary) = self._select_ds_for_volume(volume) + datastore = summary.datastore clone = self.volumeops.clone_backing(volume['name'], backing, snapshot, clone_type, datastore) LOG.info(_("Successfully created clone: %s.") % clone) diff --git a/cinder/volume/drivers/vmware/volumeops.py b/cinder/volume/drivers/vmware/volumeops.py index e2acd8f59..f582d8e82 100644 --- a/cinder/volume/drivers/vmware/volumeops.py +++ b/cinder/volume/drivers/vmware/volumeops.py @@ -222,6 +222,8 @@ class VMwareVolumeOps(object): datastores = prop.val.ManagedObjectReference elif prop.name == 'parent': compute_resource = prop.val + LOG.debug(_("Datastores attached to host %(host)s are: %(ds)s."), + {'host': host, 'ds': datastores}) # Filter datastores based on if it is accessible, mounted and writable valid_dss = [] for datastore in datastores: @@ -237,6 +239,8 @@ class VMwareVolumeOps(object): msg = _("There are no valid datastores attached to %s.") % host LOG.error(msg) raise error_util.VimException(msg) + else: + LOG.debug(_("Valid datastores are: %s"), valid_dss) return (valid_dss, resource_pool) def _get_parent(self, child, parent_type): @@ -314,13 +318,15 @@ class VMwareVolumeOps(object): LOG.debug(_("Created child folder: %s.") % child_folder) return child_folder - def _get_create_spec(self, name, size_kb, disk_type, ds_name): + def _get_create_spec(self, name, size_kb, disk_type, ds_name, + profileId=None): """Return spec for creating volume backing. :param name: Name of the backing :param size_kb: Size in KB of the backing :param disk_type: VMDK type for the disk :param ds_name: Datastore name where the disk is to be provisioned + :param profileId: storage profile ID for the backing :return: Spec for creation """ cf = self._session.vim.client.factory @@ -362,11 +368,16 @@ class VMwareVolumeOps(object): create_spec.deviceChange = [controller_spec, disk_spec] create_spec.files = vm_file_info + if profileId: + vmProfile = cf.create('ns0:VirtualMachineDefinedProfileSpec') + vmProfile.profileId = profileId + create_spec.vmProfile = [vmProfile] + LOG.debug(_("Spec for creating the backing: %s.") % create_spec) return create_spec - def create_backing(self, name, size_kb, disk_type, - folder, resource_pool, host, ds_name): + def create_backing(self, name, size_kb, disk_type, folder, resource_pool, + host, ds_name, profileId=None): """Create backing for the volume. Creates a VM with one VMDK based on the given inputs. @@ -378,17 +389,19 @@ class VMwareVolumeOps(object): :param resource_pool: Resource pool reference :param host: Host reference :param ds_name: Datastore name where the disk is to be provisioned + :param profileId: storage profile ID to be associated with backing :return: Reference to the created backing entity """ LOG.debug(_("Creating volume backing name: %(name)s " "disk_type: %(disk_type)s size_kb: %(size_kb)s at " "folder: %(folder)s resourse pool: %(resource_pool)s " - "datastore name: %(ds_name)s.") % + "datastore name: %(ds_name)s profileId: %(profile)s.") % {'name': name, 'disk_type': disk_type, 'size_kb': size_kb, 'folder': folder, 'resource_pool': resource_pool, - 'ds_name': ds_name}) + 'ds_name': ds_name, 'profile': profileId}) - create_spec = self._get_create_spec(name, size_kb, disk_type, ds_name) + create_spec = self._get_create_spec(name, size_kb, disk_type, ds_name, + profileId) task = self._session.invoke_api(self._session.vim, 'CreateVM_Task', folder, config=create_spec, pool=resource_pool, host=host) @@ -729,3 +742,53 @@ class VMwareVolumeOps(object): LOG.debug(_("Initiated deleting vmdk file via task: %s.") % task) self._session.wait_for_task(task) LOG.info(_("Deleted vmdk file: %s.") % vmdk_file_path) + + def get_all_profiles(self): + """Get all profiles defined in current VC. + + :return: PbmProfile data objects from VC + """ + LOG.debug(_("Get all profiles defined in current VC.")) + pbm = self._session.pbm + profile_manager = pbm.service_content.profileManager + res_type = pbm.client.factory.create('ns0:PbmProfileResourceType') + res_type.resourceType = 'STORAGE' + profileIds = self._session.invoke_api(pbm, 'PbmQueryProfile', + profile_manager, + resourceType=res_type) + LOG.debug(_("Got profile IDs: %s"), profileIds) + return self._session.invoke_api(pbm, 'PbmRetrieveContent', + profile_manager, + profileIds=profileIds) + + def retrieve_profile_id(self, profile_name): + """Get the profile uuid from current VC for given profile name. + + :param profile_name: profile name as string + :return: profile id as string + """ + LOG.debug(_("Trying to retrieve profile id for %s"), profile_name) + for profile in self.get_all_profiles(): + if profile.name == profile_name: + profileId = profile.profileId + LOG.debug(_("Got profile id %(id)s for profile %(name)s."), + {'id': profileId, 'name': profile_name}) + return profileId + + def filter_matching_hubs(self, hubs, profile_id): + """Filter and return only hubs that match given profile. + + :param hubs: PbmPlacementHub morefs candidates + :param profile_id: profile id string + :return: subset of hubs that match given profile_id + """ + LOG.debug(_("Filtering hubs %(hubs)s that match profile " + "%(profile)s."), {'hubs': hubs, 'profile': profile_id}) + pbm = self._session.pbm + placement_solver = pbm.service_content.placementSolver + filtered_hubs = self._session.invoke_api(pbm, 'PbmQueryMatchingHub', + placement_solver, + hubsToSearch=hubs, + profile=profile_id) + LOG.debug(_("Filtered hubs: %s"), filtered_hubs) + return filtered_hubs diff --git a/etc/cinder/cinder.conf.sample b/etc/cinder/cinder.conf.sample index 73de85242..abf31a02e 100644 --- a/etc/cinder/cinder.conf.sample +++ b/etc/cinder/cinder.conf.sample @@ -1700,6 +1700,12 @@ # less than the configured value. (integer value) #vmware_max_objects_retrieval=100 +# PBM service WSDL file location URL. e.g. +# file:///opt/SDK/spbm/wsdl/pbmService.wsdl. Not setting this +# will disable storage policy based placement of volumes. +# (string value) +#vmware_pbm_wsdl= + # # Options defined in cinder.volume.drivers.windows.windows