Test suite for VMware VMDK driver.
"""
+import mock
import mox
from cinder import exception
"""Test get_volume_stats."""
stats = self._driver.get_volume_stats()
self.assertEqual(stats['vendor_name'], 'VMware')
- self.assertEqual(stats['driver_version'], '1.1.0')
+ self.assertEqual(stats['driver_version'], self._driver.VERSION)
self.assertEqual(stats['storage_protocol'], 'LSI Logic SCSI')
self.assertEqual(stats['reserved_percentage'], 0)
self.assertEqual(stats['total_capacity_gb'], 'unknown')
m.UnsetStubs()
m.VerifyAll()
- def test_get_folder_ds_summary(self):
+ @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
+ 'session', new_callable=mock.PropertyMock)
+ @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
+ 'volumeops', new_callable=mock.PropertyMock)
+ def test_get_folder_ds_summary(self, volumeops, session):
"""Test _get_folder_ds_summary."""
- m = self.mox
- m.StubOutWithMock(self._driver.__class__, 'volumeops')
- self._driver.volumeops = self._volumeops
- size = 1
- resource_pool = FakeMor('ResourcePool', 'my_rp')
- datacenter = FakeMor('Datacenter', 'my_dc')
- m.StubOutWithMock(self._volumeops, 'get_dc')
- self._volumeops.get_dc(resource_pool).AndReturn(datacenter)
- m.StubOutWithMock(self._driver, '_get_volume_group_folder')
- folder = FakeMor('Folder', 'my_fol')
- self._driver._get_volume_group_folder(datacenter).AndReturn(folder)
- m.StubOutWithMock(self._driver, '_select_datastore_summary')
- size = 1
- datastores = [FakeMor('Datastore', 'my_ds')]
- self._driver._select_datastore_summary(size * units.GiB, datastores)
-
- m.ReplayAll()
- self._driver._get_folder_ds_summary(size, resource_pool, datastores)
- m.UnsetStubs()
- m.VerifyAll()
+ volumeops = volumeops.return_value
+ driver = self._driver
+ volume = {'size': 10, 'volume_type_id': 'fake_type'}
+ rp = mock.sentinel.resource_pool
+ dss = mock.sentinel.datastores
+ # patch method calls from _get_folder_ds_summary
+ volumeops.get_dc.return_value = mock.sentinel.dc
+ volumeops.get_vmfolder.return_value = mock.sentinel.folder
+ driver._get_storage_profile = mock.MagicMock()
+ driver._select_datastore_summary = mock.MagicMock()
+ driver._select_datastore_summary.return_value = mock.sentinel.summary
+ # call _get_folder_ds_summary
+ (folder, datastore_summary) = driver._get_folder_ds_summary(volume,
+ rp, dss)
+ # verify returned values and calls made
+ self.assertEqual(mock.sentinel.folder, folder,
+ "Folder returned is wrong.")
+ self.assertEqual(mock.sentinel.summary, datastore_summary,
+ "Datastore summary returned is wrong.")
+ volumeops.get_dc.assert_called_once_with(rp)
+ volumeops.get_vmfolder.assert_called_once_with(mock.sentinel.dc)
+ driver._get_storage_profile.assert_called_once_with(volume)
+ size = volume['size'] * units.GiB
+ driver._select_datastore_summary.assert_called_once_with(size, dss)
def test_get_disk_type(self):
"""Test _get_disk_type."""
m.StubOutWithMock(self._driver, '_get_folder_ds_summary')
folder = FakeMor('Folder', 'my_fol')
summary = FakeDatastoreSummary(1, 1)
- self._driver._get_folder_ds_summary(volume['size'], resource_pool,
+ self._driver._get_folder_ds_summary(volume, resource_pool,
datastores).AndReturn((folder,
summary))
backing = FakeMor('VirtualMachine', 'my_back')
volume['size'] * units.MiB,
mox.IgnoreArg(), folder,
resource_pool, host,
+ mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(backing)
m.ReplayAll()
(host, rp, folder, summary) = (FakeObject(), FakeObject(),
FakeObject(), FakeObject())
summary.name = "datastore-1"
- m.StubOutWithMock(self._driver, '_select_ds_for_volume')
- self._driver._select_ds_for_volume(size_gb).AndReturn((host, rp,
- folder,
- summary))
- # _get_disk_type call
vol_name = 'volume name'
volume = FakeObject()
volume['name'] = vol_name
volume['size'] = size_gb
volume['volume_type_id'] = None # _get_disk_type will return 'thin'
disk_type = 'thin'
+ m.StubOutWithMock(self._driver, '_select_ds_for_volume')
+ self._driver._select_ds_for_volume(volume).AndReturn((host, rp,
+ folder,
+ summary))
+
# _get_create_spec call
m.StubOutWithMock(self._volumeops, '_get_create_spec')
self._volumeops._get_create_spec(vol_name, 0, disk_type,
class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
"""Test class for VMwareVcVmdkDriver."""
+ PBM_WSDL = '/fake/wsdl/path'
+
def setUp(self):
super(VMwareVcVmdkDriverTestCase, self).setUp()
+ self.flags(vmware_pbm_wsdl=self.PBM_WSDL)
self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config)
def test_init_conn_with_instance_and_backing(self):
folder = FakeMor('Folder', 'my_fol')
summary = FakeDatastoreSummary(1, 1, datastore1)
size = 1
- self._driver._get_folder_ds_summary(size, resource_pool,
+ self._driver._get_folder_ds_summary(volume, resource_pool,
[datastore1]).AndReturn((folder,
summary))
m.StubOutWithMock(self._volumeops, 'relocate_backing')
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
- m.StubOutWithMock(self._volumeops, 'get_host')
backing = FakeMor('VirtualMachine', 'my_vm')
- host = FakeMor('HostSystem', 'my_host')
- self._volumeops.get_host(backing).AndReturn(host)
- m.StubOutWithMock(self._volumeops, 'get_dss_rp')
datastore = FakeMor('Datastore', 'my_ds')
- datastores = [datastore]
- resource_pool = FakeMor('ResourcePool', 'my_rp')
- self._volumeops.get_dss_rp(host).AndReturn((datastores,
- resource_pool))
- m.StubOutWithMock(self._driver, '_select_datastore_summary')
+ m.StubOutWithMock(self._driver, '_select_ds_for_volume')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['size'] = 1
summary = FakeDatastoreSummary(1, 1, datastore=datastore)
- self._driver._select_datastore_summary(volume['size'] * units.GiB,
- datastores).AndReturn(summary)
+ self._driver._select_ds_for_volume(volume).AndReturn((_, _, _,
+ summary))
m.StubOutWithMock(self._volumeops, 'clone_backing')
self._volumeops.clone_backing(volume['name'], backing,
mox.IgnoreArg(),
self._driver.create_cloned_volume, volume, src_vref)
m.UnsetStubs()
m.VerifyAll()
+
+ @mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
+ def test_get_storage_profile(self, get_volume_type_extra_specs):
+ """Test vmdk _get_storage_profile."""
+
+ # Test volume with no type id returns None
+ volume = FakeObject()
+ volume['volume_type_id'] = None
+ sp = self._driver._get_storage_profile(volume)
+ self.assertEqual(None, sp, "Without a volume_type_id no storage "
+ "profile should be returned.")
+
+ # Test volume with type id calls extra specs
+ fake_id = 'fake_volume_id'
+ volume['volume_type_id'] = fake_id
+ self._driver._get_storage_profile(volume)
+ spec_key = 'vmware:storage_profile'
+ get_volume_type_extra_specs.assert_called_once_with(fake_id, spec_key)
+
+ @mock.patch('cinder.volume.drivers.vmware.vim_util.'
+ 'convert_datastores_to_hubs')
+ @mock.patch('cinder.volume.drivers.vmware.vim_util.'
+ 'convert_hubs_to_datastores')
+ @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
+ 'session', new_callable=mock.PropertyMock)
+ @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
+ 'volumeops', new_callable=mock.PropertyMock)
+ def test_filter_ds_by_profile(self, volumeops, session, hubs_to_ds,
+ ds_to_hubs):
+ """Test vmdk _filter_ds_by_profile() method."""
+
+ volumeops = volumeops.return_value
+ session = session.return_value
+
+ # Test with no profile id
+ datastores = [mock.sentinel.ds1, mock.sentinel.ds2]
+ profile = 'fake_profile'
+ volumeops.retrieve_profile_id.return_value = None
+ self.assertRaises(error_util.VimException,
+ self._driver._filter_ds_by_profile,
+ datastores, profile)
+ volumeops.retrieve_profile_id.assert_called_once_with(profile)
+
+ # Test with a fake profile id
+ profileId = 'fake_profile_id'
+ filtered_dss = [mock.sentinel.ds1]
+ # patch method calls from _filter_ds_by_profile
+ volumeops.retrieve_profile_id.return_value = profileId
+ pbm_cf = mock.sentinel.pbm_cf
+ session.pbm.client.factory = pbm_cf
+ hubs = [mock.sentinel.hub1, mock.sentinel.hub2]
+ ds_to_hubs.return_value = hubs
+ volumeops.filter_matching_hubs.return_value = mock.sentinel.hubs
+ hubs_to_ds.return_value = filtered_dss
+ # call _filter_ds_by_profile with a fake profile
+ actual_dss = self._driver._filter_ds_by_profile(datastores, profile)
+ # verify return value and called methods
+ self.assertEqual(filtered_dss, actual_dss,
+ "Wrong filtered datastores returned.")
+ ds_to_hubs.assert_called_once_with(pbm_cf, datastores)
+ volumeops.filter_matching_hubs.assert_called_once_with(hubs,
+ profileId)
+ hubs_to_ds.assert_called_once_with(mock.sentinel.hubs, datastores)
+
+ @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
+ 'session', new_callable=mock.PropertyMock)
+ @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
+ 'volumeops', new_callable=mock.PropertyMock)
+ def test_get_folder_ds_summary(self, volumeops, session):
+ """Test _get_folder_ds_summary."""
+ volumeops = volumeops.return_value
+ driver = self._driver
+ driver._storage_policy_enabled = True
+ volume = {'size': 10, 'volume_type_id': 'fake_type'}
+ rp = mock.sentinel.resource_pool
+ dss = [mock.sentinel.datastore1, mock.sentinel.datastore2]
+ filtered_dss = [mock.sentinel.datastore1]
+ profile = mock.sentinel.profile
+
+ def filter_ds(datastores, storage_profile):
+ return filtered_dss
+
+ # patch method calls from _get_folder_ds_summary
+ volumeops.get_dc.return_value = mock.sentinel.dc
+ volumeops.get_vmfolder.return_value = mock.sentinel.vmfolder
+ volumeops.create_folder.return_value = mock.sentinel.folder
+ driver._get_storage_profile = mock.MagicMock()
+ driver._get_storage_profile.return_value = profile
+ driver._filter_ds_by_profile = mock.MagicMock(side_effect=filter_ds)
+ driver._select_datastore_summary = mock.MagicMock()
+ driver._select_datastore_summary.return_value = mock.sentinel.summary
+ # call _get_folder_ds_summary
+ (folder, datastore_summary) = driver._get_folder_ds_summary(volume,
+ rp, dss)
+ # verify returned values and calls made
+ self.assertEqual(mock.sentinel.folder, folder,
+ "Folder returned is wrong.")
+ self.assertEqual(mock.sentinel.summary, datastore_summary,
+ "Datastore summary returned is wrong.")
+ volumeops.get_dc.assert_called_once_with(rp)
+ volumeops.get_vmfolder.assert_called_once_with(mock.sentinel.dc)
+ volumeops.create_folder.assert_called_once_with(mock.sentinel.vmfolder,
+ self.VOLUME_FOLDER)
+ driver._get_storage_profile.assert_called_once_with(volume)
+ driver._filter_ds_by_profile.assert_called_once_with(dss, profile)
+ size = volume['size'] * units.GiB
+ driver._select_datastore_summary.assert_called_once_with(size,
+ filtered_dss)
resource_pool, host, ds_name)
self.assertEqual(mock.sentinel.result, ret)
get_create_spec.assert_called_once_with(name, size_kb, disk_type,
- ds_name)
+ ds_name, None)
self.session.invoke_api.assert_called_once_with(self.session.vim,
'CreateVM_Task',
folder,
-# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.volume.drivers.vmware import error_util
+from cinder.volume.drivers.vmware import pbm
from cinder.volume.drivers.vmware import vim
from cinder.volume.drivers.vmware import vim_util
@Retry(exceptions=(Exception))
def __init__(self, server_ip, server_username, server_password,
api_retry_count, task_poll_interval, scheme='https',
- create_session=True, wsdl_loc=None):
+ create_session=True, wsdl_loc=None, pbm_wsdl=None):
"""Constructs session object.
:param server_ip: IP address of ESX/VC server
:param scheme: http or https protocol
:param create_session: Boolean whether to set up connection at the
time of instance creation
- :param wsdl_loc: WSDL file location for invoking SOAP calls on server
- using suds
+ :param wsdl_loc: VIM WSDL file location for invoking SOAP calls on
+ server using suds
+ :param pbm_wsdl: PBM WSDL file location. If set to None the storage
+ policy related functionality will be disabled.
"""
self._server_ip = server_ip
self._server_username = server_username
self._scheme = scheme
self._session_id = None
self._vim = None
+ self._pbm_wsdl = pbm_wsdl
+ self._pbm = None
if create_session:
self.create_session()
wsdl_loc=self._wsdl_loc)
return self._vim
+ @property
+ def pbm(self):
+ if not self._pbm and self._pbm_wsdl:
+ self._pbm = pbm.PBMClient(self.vim, self._pbm_wsdl,
+ protocol=self._scheme,
+ host=self._server_ip)
+ return self._pbm
+
def create_session(self):
"""Establish session with the server."""
# Login and setup the session with the server for making
LOG.exception(_("Error while terminating session: %s.") %
excep)
self._session_id = session.key
+ if self.pbm:
+ self.pbm.set_cookie()
LOG.info(_("Successfully established connection to the server."))
def __del__(self):
- """Logs-out the session."""
+ """Logs-out the sessions."""
try:
self.vim.Logout(self.vim.service_content.sessionManager)
except Exception as excep:
- LOG.exception(_("Error while logging out the user: %s.") %
+ LOG.exception(_("Error while logging out from vim session: %s."),
excep)
+ if self._pbm:
+ try:
+ self.pbm.Logout(self.pbm.service_content.sessionManager)
+ except Exception as excep:
+ LOG.exception(_("Error while logging out from pbm session: "
+ "%s."), excep)
def invoke_api(self, module, method, *args, **kwargs):
"""Wrapper method for invoking APIs.
--- /dev/null
+# Copyright (c) 2013 VMware, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Class for making VMware PBM SOAP calls.
+
+This is used for storage policy based placement of volumes. Read more about
+it here:
+http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vspsdk.apiref.doc/\
+right-pane.html
+"""
+
+import suds
+import suds.sax.element as element
+
+from cinder.openstack.common import log as logging
+from cinder.volume.drivers.vmware import vim as vim_module
+from cinder.volume.drivers.vmware import vim_util
+
+LOG = logging.getLogger(__name__)
+SERVICE_INSTANCE = 'ServiceInstance'
+SERVICE_TYPE = 'PbmServiceInstance'
+
+
+class PBMClient(vim_module.Vim):
+ """Sets up a client to interact with the vSphere PBM APIs.
+
+ This client piggy backs on Vim object's authenticated cookie to invoke
+ PBM API calls.
+
+ Note that this class needs the PBM wsdl file in order to make SOAP API
+ calls. This wsdl file is included in the VMware Storage Policy SDK.
+ A user of this feature needs to install this SDK on the Cinder volume
+ nodes and configure the path in the cinder.conf file.
+ """
+
+ def __init__(self, vimSession, pbm_wsdl, protocol='https',
+ host='localhost'):
+ """Constructs a PBM client object.
+
+ :param vimSession: an authenticated api.VMwareAPISession object
+ :param pbm_wsdl: URL path to where pbmService.wsdl file is located.
+ :param protocol: http or https
+ :param host: Server IPAddress[:port] or Hostname[:port]
+ """
+ self._vimSession = vimSession
+ self._url = vim_util.get_soap_url(protocol, host, 'pbm')
+ # create the pbm client
+ self._client = suds.client.Client(pbm_wsdl, location=self._url)
+ PBMClient._copy_client_cookie(self._vimSession, self._client)
+ # Get the PBM service content
+ si_moref = vim_module.get_moref(SERVICE_INSTANCE, SERVICE_TYPE)
+ self._sc = self._client.service.PbmRetrieveServiceContent(si_moref)
+
+ @staticmethod
+ def _copy_client_cookie(vimSession, pbmClient):
+ """Copy the vim session cookie to pbm client soap header.
+
+ :param vimSession: an vim session authenticated with VC/ESX
+ :param pbmClient: a PBMClient object to set the session cookie
+ """
+ vcSessionCookie = PBMClient._get_vc_session_cookie(vimSession)
+ vcc = element.Element('vcSessionCookie').setText(vcSessionCookie)
+ pbmClient.set_options(soapheaders=vcc)
+
+ @staticmethod
+ def _get_vc_session_cookie(vimSession):
+ """Look for vmware_soap_session cookie in vimSession."""
+ cookies = vimSession.client.options.transport.cookiejar
+ for c in cookies:
+ if c.name.lower() == 'vmware_soap_session':
+ return c.value
+
+ @property
+ def service_content(self):
+ return self._sc
+
+ @property
+ def client(self):
+ return self._client
+
+ def set_cookie(self):
+ """Set the authenticated vim session cookie in this pbm client."""
+ PBMClient._copy_client_cookie(self._vimSession, self.client)
"""
import httplib
+
import suds
from cinder.volume.drivers.vmware import error_util
+from cinder.volume.drivers.vmware import vim_util
RESP_NOT_XML_ERROR = "Response is 'text/html', not 'text/xml'"
CONN_ABORT_ERROR = 'Software caused connection abort'
self._host_name = host
if not wsdl_loc:
wsdl_loc = Vim._get_wsdl_loc(protocol, host)
- soap_url = Vim._get_soap_url(protocol, host)
+ soap_url = vim_util.get_soap_url(protocol, host)
self._client = suds.client.Client(wsdl_loc, location=soap_url,
plugins=[VIMMessagePlugin()])
self._service_content = self.RetrieveServiceContent('ServiceInstance')
:param host_name: ESX/VC server host name
:return: Default WSDL file location hosted at the server
"""
- return '%s://%s/sdk/vimService.wsdl' % (protocol, host_name)
-
- @staticmethod
- def _get_soap_url(protocol, host_name):
- """Return URL to SOAP services for ESX/VC server.
-
- :param protocol: https or http
- :param host_name: ESX/VC server host name
- :return: URL to SOAP services for ESX/VC server
- """
- return '%s://%s/sdk' % (protocol, host_name)
+ return vim_util.get_soap_url(protocol, host_name) + '/vimService.wsdl'
@property
def service_content(self):
The VMware API utility module.
"""
+import netaddr
+
+
+def get_soap_url(protocol, host, path='sdk'):
+ """Return URL to SOAP services for ESX/VC server.
+
+ :param protocol: https or http
+ :param host: ESX/VC server host IP
+ :param path: path part of the SOAP URL
+ :return: URL to SOAP services for ESX/VC server
+ """
+ if netaddr.valid_ipv6(host):
+ return '%s://[%s]/%s' % (protocol, host, path)
+ return '%s://%s/%s' % (protocol, host, path)
+
def build_selection_spec(client_factory, name):
"""Builds the selection spec.
if prop:
prop_val = prop[0].val
return prop_val
+
+
+def convert_datastores_to_hubs(pbm_client_factory, datastores):
+ """Convert Datastore morefs to PbmPlacementHub morefs.
+
+ :param pbm_client_factory: pbm client factory
+ :param datastores: list of datastore morefs
+ :returns: list of PbmPlacementHub morefs
+ """
+ hubs = []
+ for ds in datastores:
+ hub = pbm_client_factory.create('ns0:PbmPlacementHub')
+ hub.hubId = ds.value
+ hub.hubType = 'Datastore'
+ hubs.append(hub)
+ return hubs
+
+
+def convert_hubs_to_datastores(hubs, datastores):
+ """Get filtered subset of datastores as represented by hubs.
+
+ :param hubs: represents a sub set of datastore ids
+ :param datastores: represents all candidate datastores
+ :returns: that subset of datastores objects that are also present in hubs
+ """
+ hubIds = [hub.hubId for hub in hubs]
+ filtered_dss = [ds for ds in datastores if ds.value in hubIds]
+ return filtered_dss
from cinder.volume.drivers.vmware import api
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim
+from cinder.volume.drivers.vmware import vim_util
from cinder.volume.drivers.vmware import vmware_images
from cinder.volume.drivers.vmware import volumeops
from cinder.volume import volume_types
'Query results will be obtained in batches from the '
'server and not in one shot. Server may still limit the '
'count to something less than the configured value.'),
+ cfg.StrOpt('vmware_pbm_wsdl',
+ help='PBM service WSDL file location URL. '
+ 'e.g. file:///opt/SDK/spbm/wsdl/pbmService.wsdl. '
+ 'Not setting this will disable storage policy based '
+ 'placement of volumes.'),
]
CONF = cfg.CONF
CONF.register_opts(vmdk_opts)
-def _get_volume_type_extra_spec(type_id, spec_key, possible_values,
- default_value):
+def _get_volume_type_extra_spec(type_id, spec_key, possible_values=None,
+ default_value=None):
"""Get extra spec value.
If the spec value is not present in the input possible_values, then
:param type_id: Volume type ID
:param spec_key: Extra spec key
- :param possible_values: Permitted values for the extra spec
+ :param possible_values: Permitted values for the extra spec if known
:param default_value: Default value for the extra spec incase of an
invalid value or if the entry does not exist
:return: extra spec value
"""
- if type_id:
- spec_key = ('vmware:%s') % spec_key
- spec_value = volume_types.get_volume_type_extra_specs(type_id,
- spec_key)
- if spec_value in possible_values:
- LOG.debug(_("Returning spec value %s") % spec_value)
- return spec_value
+ if not type_id:
+ return default_value
- LOG.debug(_("Invalid spec value: %s specified.") % spec_value)
+ spec_key = ('vmware:%s') % spec_key
+ spec_value = volume_types.get_volume_type_extra_specs(type_id,
+ spec_key)
+ if not spec_value:
+ LOG.debug(_("Returning default spec value: %s.") % default_value)
+ return default_value
- # Default we return thin disk type
- LOG.debug(_("Returning default spec value: %s.") % default_value)
- return default_value
+ if possible_values is None:
+ return spec_value
+
+ if spec_value in possible_values:
+ LOG.debug(_("Returning spec value %s") % spec_value)
+ return spec_value
+
+ LOG.debug(_("Invalid spec value: %s specified.") % spec_value)
class VMwareEsxVmdkDriver(driver.VolumeDriver):
"""Manage volumes on VMware ESX server."""
- VERSION = '1.1.0'
+ # 1.0 - initial version of driver
+ # 1.1.0 - selection of datastore based on number of host mounts
+ # 1.2.0 - storage profile volume types based placement of volumes
+ VERSION = '1.2.0'
def __init__(self, *args, **kwargs):
super(VMwareEsxVmdkDriver, self).__init__(*args, **kwargs)
self._session = None
self._stats = None
self._volumeops = None
+ # No storage policy based placement possible when connecting
+ # directly to ESX
+ self._storage_policy_enabled = False
@property
def session(self):
{'datastore': best_summary, 'host_count': max_host_count})
return best_summary
- def _get_folder_ds_summary(self, size_gb, resource_pool, datastores):
+ def _get_storage_profile(self, volume):
+ """Get storage profile associated with this volume's volume_type.
+
+ :param volume: volume whose storage profile should be queried
+ :return: string value of storage profile if volume type is associated,
+ None otherwise
+ """
+ type_id = volume['volume_type_id']
+ if not type_id:
+ return
+ return _get_volume_type_extra_spec(type_id, 'storage_profile')
+
+ def _filter_ds_by_profile(self, datastores, storage_profile):
+ """Filter out datastores that do not match given storage profile.
+
+ :param datastores: list of candidate datastores
+ :param storage_profile: storage profile name required to be satisfied
+ :return: subset of datastores that match storage_profile, or empty list
+ if none of the datastores match
+ """
+ LOG.debug(_("Filter datastores matching storage profile %(profile)s: "
+ "%(dss)s."),
+ {'profile': storage_profile, 'dss': datastores})
+ profileId = self.volumeops.retrieve_profile_id(storage_profile)
+ if not profileId:
+ msg = _("No such storage profile '%s; is defined in vCenter.")
+ LOG.error(msg, storage_profile)
+ raise error_util.VimException(msg % storage_profile)
+ pbm_cf = self.session.pbm.client.factory
+ hubs = vim_util.convert_datastores_to_hubs(pbm_cf, datastores)
+ filtered_hubs = self.volumeops.filter_matching_hubs(hubs, profileId)
+ return vim_util.convert_hubs_to_datastores(filtered_hubs, datastores)
+
+ def _get_folder_ds_summary(self, volume, resource_pool, datastores):
"""Get folder and best datastore summary where volume can be placed.
- :param size_gb: Size of the volume in GB
+ :param volume: volume to place into one of the datastores
:param resource_pool: Resource pool reference
:param datastores: Datastores from which a choice is to be made
for the volume
:return: Folder and best datastore summary where volume can be
- placed on
+ placed on.
"""
datacenter = self.volumeops.get_dc(resource_pool)
folder = self._get_volume_group_folder(datacenter)
- size_bytes = size_gb * units.GiB
+ storage_profile = self._get_storage_profile(volume)
+ if self._storage_policy_enabled and storage_profile:
+ LOG.debug(_("Storage profile required for this volume: %s."),
+ storage_profile)
+ datastores = self._filter_ds_by_profile(datastores,
+ storage_profile)
+ if not datastores:
+ msg = _("Aborting since none of the datastores match the "
+ "given storage profile %s.")
+ LOG.error(msg, storage_profile)
+ raise error_util.VimException(msg % storage_profile)
+ elif storage_profile:
+ LOG.warn(_("Ignoring storage profile %s requirement for this "
+ "volume since policy based placement is "
+ "disabled."), storage_profile)
+
+ size_bytes = volume['size'] * units.GiB
datastore_summary = self._select_datastore_summary(size_bytes,
datastores)
return (folder, datastore_summary)
# Get datastores and resource pool of the host
(datastores, resource_pool) = self.volumeops.get_dss_rp(host)
# Pick a folder and datastore to create the volume backing on
- (folder, summary) = self._get_folder_ds_summary(volume['size'],
+ (folder, summary) = self._get_folder_ds_summary(volume,
resource_pool,
datastores)
disk_type = VMwareEsxVmdkDriver._get_disk_type(volume)
size_kb = volume['size'] * units.MiB
+ storage_profile = self._get_storage_profile(volume)
+ profileId = None
+ if self._storage_policy_enabled and storage_profile:
+ profile = self.volumeops.retrieve_profile_id(storage_profile)
+ if profile:
+ profileId = profile.uniqueId
return self.volumeops.create_backing(volume['name'],
size_kb,
disk_type, folder,
resource_pool,
host,
- summary.name)
+ summary.name,
+ profileId)
- def _relocate_backing(self, size_gb, backing, host):
+ def _relocate_backing(self, volume, backing, host):
pass
- def _select_ds_for_volume(self, size_gb):
+ def _select_ds_for_volume(self, volume):
"""Select datastore that can accommodate a volume of given size.
Returns the selected datastore summary along with a compute host and
host = host.obj
try:
(dss, rp) = self.volumeops.get_dss_rp(host)
- (folder, summary) = self._get_folder_ds_summary(size_gb,
+ (folder, summary) = self._get_folder_ds_summary(volume,
rp, dss)
selected_host = host
break
LOG.warn(_("Unable to find suitable datastore for volume "
"of size: %(vol)s GB under host: %(host)s. "
"More details: %(excep)s") %
- {'vol': size_gb,
+ {'vol': volume['size'],
'host': host.obj, 'excep': excep})
if selected_host:
self.volumeops.cancel_retrieval(retrv_result)
retrv_result = self.volumeops.continue_retrieval(retrv_result)
msg = _("Unable to find host to accommodate a disk of size: %s "
- "in the inventory.") % size_gb
+ "in the inventory.") % volume['size']
LOG.error(msg)
raise error_util.VimException(msg)
backing = self._create_backing(volume, host)
else:
# Relocate volume is necessary
- self._relocate_backing(volume['size'], backing, host)
+ self._relocate_backing(volume, backing, host)
else:
# The instance does not exist
LOG.debug(_("The instance for which initialize connection "
"""
try:
# find host in which to create the volume
- size_gb = volume['size']
- (host, rp, folder, summary) = self._select_ds_for_volume(size_gb)
+ (host, rp, folder, summary) = self._select_ds_for_volume(volume)
except error_util.VimException as excep:
LOG.exception(_("Exception in _select_ds_for_volume: %s.") % excep)
raise excep
+ size_gb = volume['size']
LOG.debug(_("Selected datastore %(ds)s for new volume of size "
"%(size)s GB.") % {'ds': summary.name, 'size': size_gb})
class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
"""Manage volumes on VMware VC server."""
+ def __init__(self, *args, **kwargs):
+ super(VMwareVcVmdkDriver, self).__init__(*args, **kwargs)
+ self._session = None
+
+ @property
+ def session(self):
+ if not self._session:
+ ip = self.configuration.vmware_host_ip
+ username = self.configuration.vmware_host_username
+ password = self.configuration.vmware_host_password
+ api_retry_count = self.configuration.vmware_api_retry_count
+ task_poll_interval = self.configuration.vmware_task_poll_interval
+ wsdl_loc = self.configuration.safe_get('vmware_wsdl_location')
+ pbm_wsdl = self.configuration.vmware_pbm_wsdl
+ self._session = api.VMwareAPISession(ip, username,
+ password, api_retry_count,
+ task_poll_interval,
+ wsdl_loc=wsdl_loc,
+ pbm_wsdl=pbm_wsdl)
+ if pbm_wsdl:
+ self._storage_policy_enabled = True
+ return self._session
+
def _get_volume_group_folder(self, datacenter):
"""Get volume group folder.
volume_folder = self.configuration.vmware_volume_folder
return self.volumeops.create_folder(vm_folder, volume_folder)
- def _relocate_backing(self, size_gb, backing, host):
+ def _relocate_backing(self, volume, backing, host):
"""Relocate volume backing under host and move to volume_group folder.
If the volume backing is on a datastore that is visible to the host,
then need not do any operation.
- :param size_gb: Size of the volume in GB
+ :param volume: volume to be relocated
:param backing: Reference to the backing
:param host: Reference to the host
"""
# host managing the instance. We relocate the volume's backing.
# Pick a folder and datastore to relocate volume backing to
- (folder, summary) = self._get_folder_ds_summary(size_gb, resource_pool,
+ (folder, summary) = self._get_folder_ds_summary(volume,
+ resource_pool,
datastores)
LOG.info(_("Relocating volume: %(backing)s to %(ds)s and %(rp)s.") %
{'backing': backing, 'ds': summary, 'rp': resource_pool})
"""
datastore = None
if not clone_type == volumeops.LINKED_CLONE_TYPE:
- # Pick a datastore where to create the full clone under same host
- host = self.volumeops.get_host(backing)
- (datastores, resource_pool) = self.volumeops.get_dss_rp(host)
- size_bytes = volume['size'] * units.GiB
- datastore = self._select_datastore_summary(size_bytes,
- datastores).datastore
+ # Pick a datastore where to create the full clone under any host
+ (host, rp, folder, summary) = self._select_ds_for_volume(volume)
+ datastore = summary.datastore
clone = self.volumeops.clone_backing(volume['name'], backing,
snapshot, clone_type, datastore)
LOG.info(_("Successfully created clone: %s.") % clone)
datastores = prop.val.ManagedObjectReference
elif prop.name == 'parent':
compute_resource = prop.val
+ LOG.debug(_("Datastores attached to host %(host)s are: %(ds)s."),
+ {'host': host, 'ds': datastores})
# Filter datastores based on if it is accessible, mounted and writable
valid_dss = []
for datastore in datastores:
msg = _("There are no valid datastores attached to %s.") % host
LOG.error(msg)
raise error_util.VimException(msg)
+ else:
+ LOG.debug(_("Valid datastores are: %s"), valid_dss)
return (valid_dss, resource_pool)
def _get_parent(self, child, parent_type):
LOG.debug(_("Created child folder: %s.") % child_folder)
return child_folder
- def _get_create_spec(self, name, size_kb, disk_type, ds_name):
+ def _get_create_spec(self, name, size_kb, disk_type, ds_name,
+ profileId=None):
"""Return spec for creating volume backing.
:param name: Name of the backing
:param size_kb: Size in KB of the backing
:param disk_type: VMDK type for the disk
:param ds_name: Datastore name where the disk is to be provisioned
+ :param profileId: storage profile ID for the backing
:return: Spec for creation
"""
cf = self._session.vim.client.factory
create_spec.deviceChange = [controller_spec, disk_spec]
create_spec.files = vm_file_info
+ if profileId:
+ vmProfile = cf.create('ns0:VirtualMachineDefinedProfileSpec')
+ vmProfile.profileId = profileId
+ create_spec.vmProfile = [vmProfile]
+
LOG.debug(_("Spec for creating the backing: %s.") % create_spec)
return create_spec
- def create_backing(self, name, size_kb, disk_type,
- folder, resource_pool, host, ds_name):
+ def create_backing(self, name, size_kb, disk_type, folder, resource_pool,
+ host, ds_name, profileId=None):
"""Create backing for the volume.
Creates a VM with one VMDK based on the given inputs.
:param resource_pool: Resource pool reference
:param host: Host reference
:param ds_name: Datastore name where the disk is to be provisioned
+ :param profileId: storage profile ID to be associated with backing
:return: Reference to the created backing entity
"""
LOG.debug(_("Creating volume backing name: %(name)s "
"disk_type: %(disk_type)s size_kb: %(size_kb)s at "
"folder: %(folder)s resourse pool: %(resource_pool)s "
- "datastore name: %(ds_name)s.") %
+ "datastore name: %(ds_name)s profileId: %(profile)s.") %
{'name': name, 'disk_type': disk_type, 'size_kb': size_kb,
'folder': folder, 'resource_pool': resource_pool,
- 'ds_name': ds_name})
+ 'ds_name': ds_name, 'profile': profileId})
- create_spec = self._get_create_spec(name, size_kb, disk_type, ds_name)
+ create_spec = self._get_create_spec(name, size_kb, disk_type, ds_name,
+ profileId)
task = self._session.invoke_api(self._session.vim, 'CreateVM_Task',
folder, config=create_spec,
pool=resource_pool, host=host)
LOG.debug(_("Initiated deleting vmdk file via task: %s.") % task)
self._session.wait_for_task(task)
LOG.info(_("Deleted vmdk file: %s.") % vmdk_file_path)
+
+ def get_all_profiles(self):
+ """Get all profiles defined in current VC.
+
+ :return: PbmProfile data objects from VC
+ """
+ LOG.debug(_("Get all profiles defined in current VC."))
+ pbm = self._session.pbm
+ profile_manager = pbm.service_content.profileManager
+ res_type = pbm.client.factory.create('ns0:PbmProfileResourceType')
+ res_type.resourceType = 'STORAGE'
+ profileIds = self._session.invoke_api(pbm, 'PbmQueryProfile',
+ profile_manager,
+ resourceType=res_type)
+ LOG.debug(_("Got profile IDs: %s"), profileIds)
+ return self._session.invoke_api(pbm, 'PbmRetrieveContent',
+ profile_manager,
+ profileIds=profileIds)
+
+ def retrieve_profile_id(self, profile_name):
+ """Get the profile uuid from current VC for given profile name.
+
+ :param profile_name: profile name as string
+ :return: profile id as string
+ """
+ LOG.debug(_("Trying to retrieve profile id for %s"), profile_name)
+ for profile in self.get_all_profiles():
+ if profile.name == profile_name:
+ profileId = profile.profileId
+ LOG.debug(_("Got profile id %(id)s for profile %(name)s."),
+ {'id': profileId, 'name': profile_name})
+ return profileId
+
+ def filter_matching_hubs(self, hubs, profile_id):
+ """Filter and return only hubs that match given profile.
+
+ :param hubs: PbmPlacementHub morefs candidates
+ :param profile_id: profile id string
+ :return: subset of hubs that match given profile_id
+ """
+ LOG.debug(_("Filtering hubs %(hubs)s that match profile "
+ "%(profile)s."), {'hubs': hubs, 'profile': profile_id})
+ pbm = self._session.pbm
+ placement_solver = pbm.service_content.placementSolver
+ filtered_hubs = self._session.invoke_api(pbm, 'PbmQueryMatchingHub',
+ placement_solver,
+ hubsToSearch=hubs,
+ profile=profile_id)
+ LOG.debug(_("Filtered hubs: %s"), filtered_hubs)
+ return filtered_hubs
# less than the configured value. (integer value)
#vmware_max_objects_retrieval=100
+# PBM service WSDL file location URL. e.g.
+# file:///opt/SDK/spbm/wsdl/pbmService.wsdl. Not setting this
+# will disable storage policy based placement of volumes.
+# (string value)
+#vmware_pbm_wsdl=<None>
+
#
# Options defined in cinder.volume.drivers.windows.windows