close.assert_called_once_with(fd)
delete_if_exists.assert_called_once_with(tmp)
+ @mock.patch.object(VMDK_DRIVER, 'volumeops')
+ @mock.patch.object(VMDK_DRIVER, 'ds_sel')
+ def test_relocate_backing_nop(self, ds_sel, vops):
+ volume = {'name': 'vol-1', 'size': 1}
+
+ datastore = mock.sentinel.datastore
+ vops.get_datastore.return_value = datastore
+
+ profile = mock.sentinel.profile
+ vops.get_profile.return_value = profile
+
+ vops.is_datastore_accessible.return_value = True
+ ds_sel.is_datastore_compliant.return_value = True
+
+ backing = mock.sentinel.backing
+ host = mock.sentinel.host
+ self._driver._relocate_backing(volume, backing, host)
+
+ vops.is_datastore_accessible.assert_called_once_with(datastore, host)
+ ds_sel.is_datastore_compliant.assert_called_once_with(datastore,
+ profile)
+ self.assertFalse(vops.relocate_backing.called)
+
+ @mock.patch.object(VMDK_DRIVER, 'volumeops')
+ @mock.patch.object(VMDK_DRIVER, 'ds_sel')
+ def test_relocate_backing_with_no_datastore(
+ self, ds_sel, vops):
+ volume = {'name': 'vol-1', 'size': 1}
+
+ profile = mock.sentinel.profile
+ vops.get_profile.return_value = profile
+
+ vops.is_datastore_accessible.return_value = True
+ ds_sel.is_datastore_compliant.return_value = False
+
+ ds_sel.select_datastore.return_value = []
+
+ backing = mock.sentinel.backing
+ host = mock.sentinel.host
+
+ self.assertRaises(vmdk_exceptions.NoValidDatastoreException,
+ self._driver._relocate_backing,
+ volume,
+ backing,
+ host)
+ ds_sel.select_datastore.assert_called_once_with(
+ {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi,
+ hub.DatastoreSelector.PROFILE_NAME: profile}, hosts=[host])
+ self.assertFalse(vops.relocate_backing.called)
+
+ @mock.patch.object(VMDK_DRIVER, 'volumeops')
+ @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
+ @mock.patch.object(VMDK_DRIVER, 'ds_sel')
+ def test_relocate_backing(
+ self, ds_sel, get_volume_group_folder, vops):
+ volume = {'name': 'vol-1', 'size': 1}
+
+ vops.is_datastore_accessible.return_value = False
+ ds_sel.is_datastore_compliant.return_value = True
+
+ backing = mock.sentinel.backing
+ host = mock.sentinel.host
+
+ rp = mock.sentinel.rp
+ datastore = mock.sentinel.datastore
+ summary = mock.Mock(datastore=datastore)
+ ds_sel.select_datastore.return_value = (host, rp, summary)
+
+ folder = mock.sentinel.folder
+ get_volume_group_folder.return_value = folder
+
+ self._driver._relocate_backing(volume, backing, host)
+
+ vops.relocate_backing.assert_called_once_with(backing,
+ datastore,
+ rp,
+ host)
+ vops.move_backing_to_folder.assert_called_once_with(backing,
+ folder)
+
class ImageDiskTypeTest(test.TestCase):
"""Unit tests for ImageDiskType."""
hosts = self.vops.get_connected_hosts(datastore)
self.assertEqual([], hosts)
+ @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
+ 'get_connected_hosts')
+ def test_is_datastore_accessible(self, get_connected_hosts):
+ host_1 = mock.sentinel.host_1
+ host_2 = mock.sentinel.host_2
+ get_connected_hosts.return_value = [host_1, host_2]
+
+ ds = mock.sentinel.datastore
+ host = mock.Mock(value=mock.sentinel.host_1)
+ self.assertTrue(self.vops.is_datastore_accessible(ds, host))
+ get_connected_hosts.assert_called_once_with(ds)
+
+ @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
+ 'get_connected_hosts')
+ def test_is_datastore_accessible_with_inaccessible(self,
+ get_connected_hosts):
+ host_1 = mock.sentinel.host_1
+ get_connected_hosts.return_value = [host_1]
+
+ ds = mock.sentinel.datastore
+ host = mock.Mock(value=mock.sentinel.host_2)
+ self.assertFalse(self.vops.is_datastore_accessible(ds, host))
+ get_connected_hosts.assert_called_once_with(ds)
+
def test_is_valid(self):
with mock.patch.object(self.vops, 'get_summary') as get_summary:
summary = mock.Mock(spec=object)
datacenter=dc_ref)
self.session.wait_for_task.assert_called_once_with(task)
+ def test_get_profile(self):
+ server_obj = mock.Mock()
+ self.session.pbm.client.factory.create.return_value = server_obj
+
+ profile_ids = [mock.sentinel.profile_id]
+ profile_name = mock.sentinel.profile_name
+ profile = mock.Mock()
+ profile.name = profile_name
+ self.session.invoke_api.side_effect = [profile_ids, [profile]]
+
+ value = mock.sentinel.value
+ backing = mock.Mock(value=value)
+ self.assertEqual(profile_name, self.vops.get_profile(backing))
+
+ pbm = self.session.pbm
+ profile_manager = pbm.service_content.profileManager
+ exp_calls = [mock.call(pbm, 'PbmQueryAssociatedProfile',
+ profile_manager, entity=server_obj),
+ mock.call(pbm, 'PbmRetrieveContent', profile_manager,
+ profileIds=profile_ids)]
+ self.assertEqual(exp_calls, self.session.invoke_api.call_args_list)
+
+ self.assertEqual(value, server_obj.key)
+ self.assertEqual('virtualMachine', server_obj.objectType)
+ self.session.invoke_api.side_effect = None
+
+ def test_get_profile_with_no_profile(self):
+ server_obj = mock.Mock()
+ self.session.pbm.client.factory.create.return_value = server_obj
+
+ self.session.invoke_api.side_effect = [[]]
+
+ value = mock.sentinel.value
+ backing = mock.Mock(value=value)
+ self.assertIsNone(self.vops.get_profile(backing))
+
+ pbm = self.session.pbm
+ profile_manager = pbm.service_content.profileManager
+ exp_calls = [mock.call(pbm, 'PbmQueryAssociatedProfile',
+ profile_manager, entity=server_obj)]
+ self.assertEqual(exp_calls, self.session.invoke_api.call_args_list)
+
+ self.assertEqual(value, server_obj.key)
+ self.assertEqual('virtualMachine', server_obj.objectType)
+ self.session.invoke_api.side_effect = None
+
def test_extend_virtual_disk(self):
"""Test volumeops.extend_virtual_disk."""
task = mock.sentinel.task
return self.volumeops.create_folder(vm_folder, volume_folder)
def _relocate_backing(self, volume, backing, host):
- """Relocate volume backing under host and move to volume_group folder.
+ """Relocate volume backing to a datastore accessible to the given host.
- If the volume backing is on a datastore that is visible to the host,
- then need not do any operation.
+ The backing is not relocated if the current datastore is already
+ accessible to the host and compliant with the backing's storage
+ profile.
- :param volume: volume to be relocated
+ :param volume: Volume to be relocated
:param backing: Reference to the backing
:param host: Reference to the host
"""
- # Check if volume's datastore is visible to host managing
- # the instance
- (datastores, resource_pool) = self.volumeops.get_dss_rp(host)
+ # Check if the current datastore is visible to the host managing
+ # the instance and compliant with the storage profile.
datastore = self.volumeops.get_datastore(backing)
-
- visible_to_host = False
- for _datastore in datastores:
- if _datastore.value == datastore.value:
- visible_to_host = True
- break
- if visible_to_host:
+ backing_profile = self.volumeops.get_profile(backing)
+ if (self.volumeops.is_datastore_accessible(datastore, host) and
+ self.ds_sel.is_datastore_compliant(datastore,
+ backing_profile)):
+ LOG.debug("Datastore: %(datastore)s of backing: %(backing)s is "
+ "already accessible to instance's host: %(host)s and "
+ "compliant with storage profile: %(profile)s.",
+ {'backing': backing,
+ 'datastore': datastore,
+ 'host': host,
+ 'profile': backing_profile})
return
- # The volume's backing is on a datastore that is not visible to the
- # host managing the instance. We relocate the volume's backing.
+ # We need to relocate the backing to an accessible and profile
+ # compliant datastore.
+ req = {}
+ req[hub.DatastoreSelector.SIZE_BYTES] = (volume['size'] *
+ units.Gi)
+ req[hub.DatastoreSelector.PROFILE_NAME] = backing_profile
+
+ # Select datastore satisfying the requirements.
+ best_candidate = self.ds_sel.select_datastore(req, hosts=[host])
+ if not best_candidate:
+ # No candidate datastore to relocate.
+ msg = _("There are no datastores matching volume requirements;"
+ " can't relocate volume: %s.") % volume['name']
+ LOG.error(msg)
+ raise vmdk_exceptions.NoValidDatastoreException(msg)
+
+ (host, resource_pool, summary) = best_candidate
+ dc = self.volumeops.get_dc(resource_pool)
+ folder = self._get_volume_group_folder(dc)
- # Pick a folder and datastore to relocate volume backing to
- (folder, summary) = self._get_folder_ds_summary(volume,
- resource_pool,
- datastores)
- LOG.info(_LI("Relocating volume: %(backing)s to %(ds)s and %(rp)s."),
- {'backing': backing, 'ds': summary, 'rp': resource_pool})
- # Relocate the backing to the datastore and folder
self.volumeops.relocate_backing(backing, summary.datastore,
resource_pool, host)
self.volumeops.move_backing_to_folder(backing, folder)
return connected_hosts
+ def is_datastore_accessible(self, datastore, host):
+ """Check if the datastore is accessible to the given host.
+
+ :param datastore: datastore reference
+ :return: True if the datastore is accessible
+ """
+ hosts = self.get_connected_hosts(datastore)
+ return host.value in hosts
+
def _in_maintenance(self, summary):
"""Check if a datastore is entering maintenance or in maintenance.
LOG.debug("Initiated deleting vmdk file via task: %s.", task)
self._session.wait_for_task(task)
LOG.info(_LI("Deleted vmdk file: %s."), vmdk_file_path)
+
+ def get_profile(self, backing):
+ """Query storage profile associated with the given backing.
+
+ :param backing: backing reference
+ :return: profile name
+ """
+ pbm = self._session.pbm
+ profile_manager = pbm.service_content.profileManager
+
+ object_ref = pbm.client.factory.create('ns0:PbmServerObjectRef')
+ object_ref.key = backing.value
+ object_ref.objectType = 'virtualMachine'
+
+ profile_ids = self._session.invoke_api(pbm,
+ 'PbmQueryAssociatedProfile',
+ profile_manager,
+ entity=object_ref)
+ if profile_ids:
+ profiles = self._session.invoke_api(pbm,
+ 'PbmRetrieveContent',
+ profile_manager,
+ profileIds=profile_ids)
+ return profiles[0].name