]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
VMware: Fix missing target resource pool
authorJohnson koil raj <johnson.raj@hp.com>
Sat, 31 Jan 2015 09:54:57 +0000 (15:24 +0530)
committerVipin Balachandran <vbala@vmware.com>
Fri, 29 May 2015 06:02:34 +0000 (11:32 +0530)
The volume is cloned while creating it from another volume, snapshot
or image. Currently, the target resource pool is unset while calling
vCenter CloneVM_Task API.

So when a clone happens across clusters in a vCenter clone fails.
This patch fixes the problem by setting the target resource pool
(returned by datastore selection logic) while invoking clone.

Closes-Bug: #1416456

Change-Id: Ib1023efe6fd19d604af89f7e4b4f67640d778d39
(cherry picked from commit 0df929e60c9053240b792dea7ed7e2c10de69fc1)

cinder/tests/test_vmware_vmdk.py
cinder/tests/test_vmware_volumeops.py
cinder/volume/drivers/vmware/vmdk.py
cinder/volume/drivers/vmware/volumeops.py

index 9da3376d5a67227275fbdc2c8832e8c5587887d8..cf76beb2ad88dab5fe01827897783ef548087649 100644 (file)
@@ -864,7 +864,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
         select_ds_for_volume.assert_called_once_with(volume)
         vops.clone_backing.assert_called_once_with(
             volume['name'], backing, None, volumeops.FULL_CLONE_TYPE,
-            summary.datastore, disk_type, mock.sentinel.host)
+            summary.datastore, disk_type, mock.sentinel.host, mock.sentinel.rp)
         vops.delete_backing.assert_called_once_with(backing)
         self.assertFalse(extend_disk.called)
 
@@ -1484,7 +1484,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
         vops.rename_backing.assert_called_once_with(backing, uuid)
         vops.clone_backing.assert_called_once_with(
             vol['name'], backing, None, volumeops.FULL_CLONE_TYPE,
-            datastore, vmdk.THIN_VMDK_TYPE, host)
+            datastore, vmdk.THIN_VMDK_TYPE, host, rp)
         delete_temp_backing.assert_called_once_with(backing)
         vops.change_backing_profile.assert_called_once_with(clone,
                                                             profile_id)
@@ -1696,8 +1696,8 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
 
         summary = mock.Mock()
         summary.datastore = mock.sentinel.datastore
-        select_ds.return_value = (mock.sentinel.host, mock.ANY, mock.ANY,
-                                  summary)
+        select_ds.return_value = (mock.sentinel.host, mock.sentinel.rp,
+                                  mock.ANY, summary)
 
         disk_type = vmdk.THIN_VMDK_TYPE
         get_disk_type.return_value = disk_type
@@ -1714,7 +1714,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
             context, src_uuid, volume, tmp_file_path, backup_size)
         vops.clone_backing.assert_called_once_with(
             volume['name'], src, None, volumeops.FULL_CLONE_TYPE,
-            summary.datastore, disk_type, mock.sentinel.host)
+            summary.datastore, disk_type, mock.sentinel.host, mock.sentinel.rp)
         delete_temp_backing.assert_called_once_with(src)
 
         create_backing.reset_mock()
@@ -1736,7 +1736,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
             context, src_uuid, volume, tmp_file_path, backup_size)
         vops.clone_backing.assert_called_once_with(
             dest_uuid, src, None, volumeops.FULL_CLONE_TYPE,
-            summary.datastore, disk_type, mock.sentinel.host)
+            summary.datastore, disk_type, mock.sentinel.host, mock.sentinel.rp)
         exp_rename_calls = [mock.call(backing, tmp_uuid),
                             mock.call(dest, volume['name'])]
         self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
@@ -2122,7 +2122,8 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
                                                     fake_snapshot,
                                                     fake_type,
                                                     None,
-                                                    host=None)
+                                                    host=None,
+                                                    resource_pool=None)
         # If the volume size is greater than the original snapshot size,
         # _extend_vmdk_virtual_disk will be called.
         _extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
@@ -2168,7 +2169,9 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
                                                     fake_snapshot,
                                                     volumeops.FULL_CLONE_TYPE,
                                                     fake_datastore,
-                                                    host=fake_host)
+                                                    host=fake_host,
+                                                    resource_pool=
+                                                    fake_resource_pool)
         # If the volume size is greater than the original snapshot size,
         # _extend_vmdk_virtual_disk will be called.
         _extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
index 50a1f0cbd9a711104241c3a9b0797c5fb0ff70df..f4b60ce610bbe1b650a35d4921844522b5f3d45d 100644 (file)
@@ -1026,7 +1026,7 @@ class VolumeOpsTestCase(test.TestCase):
         self.assertEqual(mock.sentinel.new_backing, ret)
         disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
         get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
-                                          backing, None, None)
+                                          backing, None, None, None)
         expected = [mock.call(vim_util, 'get_object_property',
                               self.session.vim, backing, 'parent'),
                     mock.call(self.session.vim, 'CloneVM_Task', backing,
@@ -1042,7 +1042,7 @@ class VolumeOpsTestCase(test.TestCase):
         self.assertEqual(mock.sentinel.new_backing, ret)
         disk_move_type = 'createNewChildDiskBacking'
         get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
-                                          backing, None, None)
+                                          backing, None, None, None)
         expected = [mock.call(vim_util, 'get_object_property',
                               self.session.vim, backing, 'parent'),
                     mock.call(self.session.vim, 'CloneVM_Task', backing,
@@ -1053,14 +1053,15 @@ class VolumeOpsTestCase(test.TestCase):
         clone_type = None
         disk_type = 'thin'
         host = mock.sentinel.host
+        rp = mock.sentinel.rp
         self.session.invoke_api.reset_mock()
         ret = self.vops.clone_backing(name, backing, snapshot, clone_type,
-                                      datastore, disk_type, host)
+                                      datastore, disk_type, host, rp)
 
         self.assertEqual(mock.sentinel.new_backing, ret)
         disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
         get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
-                                          backing, disk_type, host)
+                                          backing, disk_type, host, rp)
         expected = [mock.call(vim_util, 'get_object_property',
                               self.session.vim, backing, 'parent'),
                     mock.call(self.session.vim, 'CloneVM_Task', backing,
index 338765f84b1d00317b818757ffae5b853556bd30..c8c2eab3f71b10661f55d22087855ed8bee4259c 100644 (file)
@@ -1144,7 +1144,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
 
             if disk_conversion:
                 # Clone the temporary backing for disk type conversion.
-                (host, _rp, _folder, summary) = self._select_ds_for_volume(
+                (host, rp, _folder, summary) = self._select_ds_for_volume(
                     volume)
                 datastore = summary.datastore
                 LOG.debug("Cloning temporary backing: %s for disk type "
@@ -1155,7 +1155,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                                              volumeops.FULL_CLONE_TYPE,
                                              datastore,
                                              disk_type,
-                                             host)
+                                             host,
+                                             rp)
                 self._delete_temp_backing(backing)
         except Exception:
             # Delete backing and virtual disk created from image.
@@ -1535,7 +1536,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                     new_backing = self.volumeops.clone_backing(
                         volume['name'], backing, None,
                         volumeops.FULL_CLONE_TYPE, datastore, new_disk_type,
-                        host)
+                        host, rp)
                     self._delete_temp_backing(backing)
                     backing = new_backing
                 except exceptions.VimException:
@@ -1762,13 +1763,13 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         renamed = False
         try:
             # Find datastore for clone.
-            (host, _rp, _folder, summary) = self._select_ds_for_volume(volume)
+            (host, rp, _folder, summary) = self._select_ds_for_volume(volume)
             datastore = summary.datastore
 
             disk_type = VMwareEsxVmdkDriver._get_disk_type(volume)
             dest = self.volumeops.clone_backing(dest_name, src, None,
                                                 volumeops.FULL_CLONE_TYPE,
-                                                datastore, disk_type, host)
+                                                datastore, disk_type, host, rp)
             if new_backing:
                 LOG.debug("Created new backing: %s for restoring backup.",
                           dest_name)
@@ -2031,13 +2032,14 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
         """
         datastore = None
         host = None
+        rp = None
         if not clone_type == volumeops.LINKED_CLONE_TYPE:
             # Pick a datastore where to create the full clone under any host
-            (host, _rp, _folder, summary) = self._select_ds_for_volume(volume)
+            (host, rp, _folder, summary) = self._select_ds_for_volume(volume)
             datastore = summary.datastore
         clone = self.volumeops.clone_backing(volume['name'], backing,
                                              snapshot, clone_type, datastore,
-                                             host=host)
+                                             host=host, resource_pool=rp)
         # If the volume size specified by the user is greater than
         # the size of the source volume, the newly created volume will
         # allocate the capacity to the size of the source volume in the backend
index e401f7126273a299e99c3a0e5c8e4b1364080f00..a826133abe7b92b79fd327e63ec1c51b59267214 100644 (file)
@@ -1012,7 +1012,7 @@ class VMwareVolumeOps(object):
         return self._get_parent(backing, 'Folder')
 
     def _get_clone_spec(self, datastore, disk_move_type, snapshot, backing,
-                        disk_type, host=None):
+                        disk_type, host=None, resource_pool=None):
         """Get the clone spec.
 
         :param datastore: Reference to datastore
@@ -1021,6 +1021,7 @@ class VMwareVolumeOps(object):
         :param backing: Source backing VM
         :param disk_type: Disk type of clone
         :param host: Target host
+        :param resource_pool: Target resource pool
         :return: Clone spec
         """
         if disk_type is not None:
@@ -1028,7 +1029,7 @@ class VMwareVolumeOps(object):
         else:
             disk_device = None
 
-        relocate_spec = self._get_relocate_spec(datastore, None, host,
+        relocate_spec = self._get_relocate_spec(datastore, resource_pool, host,
                                                 disk_move_type, disk_type,
                                                 disk_device)
         cf = self._session.vim.client.factory
@@ -1042,7 +1043,7 @@ class VMwareVolumeOps(object):
         return clone_spec
 
     def clone_backing(self, name, backing, snapshot, clone_type, datastore,
-                      disk_type=None, host=None):
+                      disk_type=None, host=None, resource_pool=None):
         """Clone backing.
 
         If the clone_type is 'full', then a full clone of the source volume
@@ -1056,21 +1057,23 @@ class VMwareVolumeOps(object):
         :param datastore: Reference to the datastore entity
         :param disk_type: Disk type of the clone
         :param host: Target host
+        :param resource_pool: Target resource pool
         """
         LOG.debug("Creating a clone of backing: %(back)s, named: %(name)s, "
                   "clone type: %(type)s from snapshot: %(snap)s on "
-                  "host: %(host)s, datastore: %(ds)s with disk type: "
-                  "%(disk_type)s.",
+                  "resource pool: %(resource_pool)s, host: %(host)s, "
+                  "datastore: %(ds)s with disk type: %(disk_type)s.",
                   {'back': backing, 'name': name, 'type': clone_type,
                    'snap': snapshot, 'ds': datastore, 'disk_type': disk_type,
-                   'host': host})
+                   'host': host, 'resource_pool': resource_pool})
         folder = self._get_folder(backing)
         if clone_type == LINKED_CLONE_TYPE:
             disk_move_type = 'createNewChildDiskBacking'
         else:
             disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
         clone_spec = self._get_clone_spec(datastore, disk_move_type, snapshot,
-                                          backing, disk_type, host)
+                                          backing, disk_type, host,
+                                          resource_pool)
         task = self._session.invoke_api(self._session.vim, 'CloneVM_Task',
                                         backing, folder=folder, name=name,
                                         spec=clone_spec)