]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
VMware: Relocate volume to compliant datastore
authorVipin Balachandran <vbala@vmware.com>
Wed, 27 Aug 2014 14:56:34 +0000 (20:26 +0530)
committerVipin Balachandran <vbala@vmware.com>
Mon, 23 Feb 2015 14:23:01 +0000 (19:53 +0530)
During attach to a nova instance, the backing VM
corresponding to the volume is relocated only if
the nova instance's ESX host cannot access the
backing's current datastore. The storage profile
is ignored and the volume's virtual disk might
end up in a non-compliant datastore. This patch
fixes the problem by checking storage profile
compliance of the current datastore.

Change-Id: I9f0ed6e179160a51e8c370087e9c903ed28e1728
Closes-Bug: #1301348

cinder/tests/test_vmware_vmdk.py
cinder/tests/test_vmware_volumeops.py
cinder/volume/drivers/vmware/vmdk.py
cinder/volume/drivers/vmware/volumeops.py

index d64ca6b650c8e36f8e01a53c2ac021ec7ddf2bf1..2014f3e6ae2cc04fa1654a56b4ad7d7d61329c7f 100644 (file)
@@ -2575,6 +2575,86 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
             close.assert_called_once_with(fd)
         delete_if_exists.assert_called_once_with(tmp)
 
+    @mock.patch.object(VMDK_DRIVER, 'volumeops')
+    @mock.patch.object(VMDK_DRIVER, 'ds_sel')
+    def test_relocate_backing_nop(self, ds_sel, vops):
+        volume = {'name': 'vol-1', 'size': 1}
+
+        datastore = mock.sentinel.datastore
+        vops.get_datastore.return_value = datastore
+
+        profile = mock.sentinel.profile
+        vops.get_profile.return_value = profile
+
+        vops.is_datastore_accessible.return_value = True
+        ds_sel.is_datastore_compliant.return_value = True
+
+        backing = mock.sentinel.backing
+        host = mock.sentinel.host
+        self._driver._relocate_backing(volume, backing, host)
+
+        vops.is_datastore_accessible.assert_called_once_with(datastore, host)
+        ds_sel.is_datastore_compliant.assert_called_once_with(datastore,
+                                                              profile)
+        self.assertFalse(vops.relocate_backing.called)
+
+    @mock.patch.object(VMDK_DRIVER, 'volumeops')
+    @mock.patch.object(VMDK_DRIVER, 'ds_sel')
+    def test_relocate_backing_with_no_datastore(
+            self, ds_sel, vops):
+        volume = {'name': 'vol-1', 'size': 1}
+
+        profile = mock.sentinel.profile
+        vops.get_profile.return_value = profile
+
+        vops.is_datastore_accessible.return_value = True
+        ds_sel.is_datastore_compliant.return_value = False
+
+        ds_sel.select_datastore.return_value = []
+
+        backing = mock.sentinel.backing
+        host = mock.sentinel.host
+
+        self.assertRaises(vmdk_exceptions.NoValidDatastoreException,
+                          self._driver._relocate_backing,
+                          volume,
+                          backing,
+                          host)
+        ds_sel.select_datastore.assert_called_once_with(
+            {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi,
+             hub.DatastoreSelector.PROFILE_NAME: profile}, hosts=[host])
+        self.assertFalse(vops.relocate_backing.called)
+
+    @mock.patch.object(VMDK_DRIVER, 'volumeops')
+    @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
+    @mock.patch.object(VMDK_DRIVER, 'ds_sel')
+    def test_relocate_backing(
+            self, ds_sel, get_volume_group_folder, vops):
+        volume = {'name': 'vol-1', 'size': 1}
+
+        vops.is_datastore_accessible.return_value = False
+        ds_sel.is_datastore_compliant.return_value = True
+
+        backing = mock.sentinel.backing
+        host = mock.sentinel.host
+
+        rp = mock.sentinel.rp
+        datastore = mock.sentinel.datastore
+        summary = mock.Mock(datastore=datastore)
+        ds_sel.select_datastore.return_value = (host, rp, summary)
+
+        folder = mock.sentinel.folder
+        get_volume_group_folder.return_value = folder
+
+        self._driver._relocate_backing(volume, backing, host)
+
+        vops.relocate_backing.assert_called_once_with(backing,
+                                                      datastore,
+                                                      rp,
+                                                      host)
+        vops.move_backing_to_folder.assert_called_once_with(backing,
+                                                            folder)
+
 
 class ImageDiskTypeTest(test.TestCase):
     """Unit tests for ImageDiskType."""
index 19b8264085384e1f14906ab6f988edfd38185d63..237e52e2124fba7df172e0875a0dacdf345eff7a 100644 (file)
@@ -238,6 +238,30 @@ class VolumeOpsTestCase(test.TestCase):
             hosts = self.vops.get_connected_hosts(datastore)
             self.assertEqual([], hosts)
 
+    @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
+                'get_connected_hosts')
+    def test_is_datastore_accessible(self, get_connected_hosts):
+        host_1 = mock.sentinel.host_1
+        host_2 = mock.sentinel.host_2
+        get_connected_hosts.return_value = [host_1, host_2]
+
+        ds = mock.sentinel.datastore
+        host = mock.Mock(value=mock.sentinel.host_1)
+        self.assertTrue(self.vops.is_datastore_accessible(ds, host))
+        get_connected_hosts.assert_called_once_with(ds)
+
+    @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
+                'get_connected_hosts')
+    def test_is_datastore_accessible_with_inaccessible(self,
+                                                       get_connected_hosts):
+        host_1 = mock.sentinel.host_1
+        get_connected_hosts.return_value = [host_1]
+
+        ds = mock.sentinel.datastore
+        host = mock.Mock(value=mock.sentinel.host_2)
+        self.assertFalse(self.vops.is_datastore_accessible(ds, host))
+        get_connected_hosts.assert_called_once_with(ds)
+
     def test_is_valid(self):
         with mock.patch.object(self.vops, 'get_summary') as get_summary:
             summary = mock.Mock(spec=object)
@@ -1305,6 +1329,52 @@ class VolumeOpsTestCase(test.TestCase):
                                            datacenter=dc_ref)
         self.session.wait_for_task.assert_called_once_with(task)
 
+    def test_get_profile(self):
+        server_obj = mock.Mock()
+        self.session.pbm.client.factory.create.return_value = server_obj
+
+        profile_ids = [mock.sentinel.profile_id]
+        profile_name = mock.sentinel.profile_name
+        profile = mock.Mock()
+        profile.name = profile_name
+        self.session.invoke_api.side_effect = [profile_ids, [profile]]
+
+        value = mock.sentinel.value
+        backing = mock.Mock(value=value)
+        self.assertEqual(profile_name, self.vops.get_profile(backing))
+
+        pbm = self.session.pbm
+        profile_manager = pbm.service_content.profileManager
+        exp_calls = [mock.call(pbm, 'PbmQueryAssociatedProfile',
+                               profile_manager, entity=server_obj),
+                     mock.call(pbm, 'PbmRetrieveContent', profile_manager,
+                               profileIds=profile_ids)]
+        self.assertEqual(exp_calls, self.session.invoke_api.call_args_list)
+
+        self.assertEqual(value, server_obj.key)
+        self.assertEqual('virtualMachine', server_obj.objectType)
+        self.session.invoke_api.side_effect = None
+
+    def test_get_profile_with_no_profile(self):
+        server_obj = mock.Mock()
+        self.session.pbm.client.factory.create.return_value = server_obj
+
+        self.session.invoke_api.side_effect = [[]]
+
+        value = mock.sentinel.value
+        backing = mock.Mock(value=value)
+        self.assertIsNone(self.vops.get_profile(backing))
+
+        pbm = self.session.pbm
+        profile_manager = pbm.service_content.profileManager
+        exp_calls = [mock.call(pbm, 'PbmQueryAssociatedProfile',
+                               profile_manager, entity=server_obj)]
+        self.assertEqual(exp_calls, self.session.invoke_api.call_args_list)
+
+        self.assertEqual(value, server_obj.key)
+        self.assertEqual('virtualMachine', server_obj.objectType)
+        self.session.invoke_api.side_effect = None
+
     def test_extend_virtual_disk(self):
         """Test volumeops.extend_virtual_disk."""
         task = mock.sentinel.task
index f3f3fbd48bd6ac00061655de9c5eb15ea5c637b3..e73969788c2c5b4a6ca8ce81b43ce4bfd51249be 100644 (file)
@@ -1887,38 +1887,52 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
         return self.volumeops.create_folder(vm_folder, volume_folder)
 
     def _relocate_backing(self, volume, backing, host):
-        """Relocate volume backing under host and move to volume_group folder.
+        """Relocate volume backing to a datastore accessible to the given host.
 
-        If the volume backing is on a datastore that is visible to the host,
-        then need not do any operation.
+        The backing is not relocated if the current datastore is already
+        accessible to the host and compliant with the backing's storage
+        profile.
 
-        :param volume: volume to be relocated
+        :param volume: Volume to be relocated
         :param backing: Reference to the backing
         :param host: Reference to the host
         """
-        # Check if volume's datastore is visible to host managing
-        # the instance
-        (datastores, resource_pool) = self.volumeops.get_dss_rp(host)
+        # Check if the current datastore is visible to the host managing
+        # the instance and compliant with the storage profile.
         datastore = self.volumeops.get_datastore(backing)
-
-        visible_to_host = False
-        for _datastore in datastores:
-            if _datastore.value == datastore.value:
-                visible_to_host = True
-                break
-        if visible_to_host:
+        backing_profile = self.volumeops.get_profile(backing)
+        if (self.volumeops.is_datastore_accessible(datastore, host) and
+                self.ds_sel.is_datastore_compliant(datastore,
+                                                   backing_profile)):
+            LOG.debug("Datastore: %(datastore)s of backing: %(backing)s is "
+                      "already accessible to instance's host: %(host)s and "
+                      "compliant with storage profile: %(profile)s.",
+                      {'backing': backing,
+                       'datastore': datastore,
+                       'host': host,
+                       'profile': backing_profile})
             return
 
-        # The volume's backing is on a datastore that is not visible to the
-        # host managing the instance. We relocate the volume's backing.
+        # We need to relocate the backing to an accessible and profile
+        # compliant datastore.
+        req = {}
+        req[hub.DatastoreSelector.SIZE_BYTES] = (volume['size'] *
+                                                 units.Gi)
+        req[hub.DatastoreSelector.PROFILE_NAME] = backing_profile
+
+        # Select datastore satisfying the requirements.
+        best_candidate = self.ds_sel.select_datastore(req, hosts=[host])
+        if not best_candidate:
+            # No candidate datastore to relocate.
+            msg = _("There are no datastores matching volume requirements;"
+                    " can't relocate volume: %s.") % volume['name']
+            LOG.error(msg)
+            raise vmdk_exceptions.NoValidDatastoreException(msg)
+
+        (host, resource_pool, summary) = best_candidate
+        dc = self.volumeops.get_dc(resource_pool)
+        folder = self._get_volume_group_folder(dc)
 
-        # Pick a folder and datastore to relocate volume backing to
-        (folder, summary) = self._get_folder_ds_summary(volume,
-                                                        resource_pool,
-                                                        datastores)
-        LOG.info(_LI("Relocating volume: %(backing)s to %(ds)s and %(rp)s."),
-                 {'backing': backing, 'ds': summary, 'rp': resource_pool})
-        # Relocate the backing to the datastore and folder
         self.volumeops.relocate_backing(backing, summary.datastore,
                                         resource_pool, host)
         self.volumeops.move_backing_to_folder(backing, folder)
index 024dea29482b2dd7702028c972131c436d93e2b5..780a2272f407d136b8f5d72391478a378a324680 100644 (file)
@@ -392,6 +392,15 @@ class VMwareVolumeOps(object):
 
         return connected_hosts
 
+    def is_datastore_accessible(self, datastore, host):
+        """Check if the datastore is accessible to the given host.
+
+        :param datastore: datastore reference
+        :return: True if the datastore is accessible
+        """
+        hosts = self.get_connected_hosts(datastore)
+        return host.value in hosts
+
     def _in_maintenance(self, summary):
         """Check if a datastore is entering maintenance or in maintenance.
 
@@ -1340,3 +1349,27 @@ class VMwareVolumeOps(object):
         LOG.debug("Initiated deleting vmdk file via task: %s.", task)
         self._session.wait_for_task(task)
         LOG.info(_LI("Deleted vmdk file: %s."), vmdk_file_path)
+
+    def get_profile(self, backing):
+        """Query storage profile associated with the given backing.
+
+        :param backing: backing reference
+        :return: profile name
+        """
+        pbm = self._session.pbm
+        profile_manager = pbm.service_content.profileManager
+
+        object_ref = pbm.client.factory.create('ns0:PbmServerObjectRef')
+        object_ref.key = backing.value
+        object_ref.objectType = 'virtualMachine'
+
+        profile_ids = self._session.invoke_api(pbm,
+                                               'PbmQueryAssociatedProfile',
+                                               profile_manager,
+                                               entity=object_ref)
+        if profile_ids:
+            profiles = self._session.invoke_api(pbm,
+                                                'PbmRetrieveContent',
+                                                profile_manager,
+                                                profileIds=profile_ids)
+            return profiles[0].name