]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
VMware: Fix missing target resource pool
authorJohnson koil raj <johnson.raj@hp.com>
Sat, 31 Jan 2015 09:54:57 +0000 (15:24 +0530)
committerJohnson koil raj <johnson.raj@hp.com>
Mon, 2 Feb 2015 13:25:48 +0000 (18:55 +0530)
The volume is cloned while creating it from another volume, snapshot
or image. Currently, the target resource pool is unset while calling
vCenter CloneVM_Task API.

So when a clone happens across clusters in a vCenter clone fails.
This patch fixes the problem by setting the target resource pool
(returned by datastore selection logic) while invoking clone.

Closes-Bug: #1416456

Change-Id: Ib1023efe6fd19d604af89f7e4b4f67640d778d39

cinder/tests/test_vmware_vmdk.py
cinder/tests/test_vmware_volumeops.py
cinder/volume/drivers/vmware/vmdk.py
cinder/volume/drivers/vmware/volumeops.py

index eac4f5d28c6656ccf95ede5493bca4e5ddb5a984..7b8e831392526e1cac8e234d275b4dd222d4c0f5 100644 (file)
@@ -1053,7 +1053,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
         select_ds_for_volume.assert_called_once_with(volume)
         vops.clone_backing.assert_called_once_with(
             volume['name'], backing, None, volumeops.FULL_CLONE_TYPE,
-            summary.datastore, disk_type, mock.sentinel.host)
+            summary.datastore, disk_type, mock.sentinel.host, mock.sentinel.rp)
         vops.delete_backing.assert_called_once_with(backing)
         self.assertFalse(extend_disk.called)
 
@@ -1577,7 +1577,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
         vops.rename_backing.assert_called_once_with(backing, uuid)
         vops.clone_backing.assert_called_once_with(
             vol['name'], backing, None, volumeops.FULL_CLONE_TYPE,
-            datastore, vmdk.THIN_VMDK_TYPE, host)
+            datastore, vmdk.THIN_VMDK_TYPE, host, rp)
         delete_temp_backing.assert_called_once_with(backing)
         vops.change_backing_profile.assert_called_once_with(clone,
                                                             profile_id)
@@ -1789,8 +1789,8 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
 
         summary = mock.Mock()
         summary.datastore = mock.sentinel.datastore
-        select_ds.return_value = (mock.sentinel.host, mock.ANY, mock.ANY,
-                                  summary)
+        select_ds.return_value = (mock.sentinel.host, mock.sentinel.rp,
+                                  mock.ANY, summary)
 
         disk_type = vmdk.THIN_VMDK_TYPE
         get_disk_type.return_value = disk_type
@@ -1807,7 +1807,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
             context, src_uuid, volume, tmp_file_path, backup_size)
         vops.clone_backing.assert_called_once_with(
             volume['name'], src, None, volumeops.FULL_CLONE_TYPE,
-            summary.datastore, disk_type, mock.sentinel.host)
+            summary.datastore, disk_type, mock.sentinel.host, mock.sentinel.rp)
         delete_temp_backing.assert_called_once_with(src)
 
         create_backing.reset_mock()
@@ -1829,7 +1829,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
             context, src_uuid, volume, tmp_file_path, backup_size)
         vops.clone_backing.assert_called_once_with(
             dest_uuid, src, None, volumeops.FULL_CLONE_TYPE,
-            summary.datastore, disk_type, mock.sentinel.host)
+            summary.datastore, disk_type, mock.sentinel.host, mock.sentinel.rp)
         exp_rename_calls = [mock.call(backing, tmp_uuid),
                             mock.call(dest, volume['name'])]
         self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
@@ -2157,7 +2157,8 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
                                                     fake_snapshot,
                                                     fake_type,
                                                     None,
-                                                    host=None)
+                                                    host=None,
+                                                    resource_pool=None)
         # If the volume size is greater than the original snapshot size,
         # _extend_vmdk_virtual_disk will be called.
         _extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
@@ -2203,7 +2204,9 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
                                                     fake_snapshot,
                                                     volumeops.FULL_CLONE_TYPE,
                                                     fake_datastore,
-                                                    host=fake_host)
+                                                    host=fake_host,
+                                                    resource_pool=
+                                                    fake_resource_pool)
         # If the volume size is greater than the original snapshot size,
         # _extend_vmdk_virtual_disk will be called.
         _extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
index 2a5df28b1f08565215becc6297defd74441ce5a1..38a82f4aad920793243757fd2d3bff6e7d28ede9 100644 (file)
@@ -1001,7 +1001,7 @@ class VolumeOpsTestCase(test.TestCase):
         self.assertEqual(mock.sentinel.new_backing, ret)
         disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
         get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
-                                          backing, None, None)
+                                          backing, None, None, None)
         expected = [mock.call(vim_util, 'get_object_property',
                               self.session.vim, backing, 'parent'),
                     mock.call(self.session.vim, 'CloneVM_Task', backing,
@@ -1017,7 +1017,7 @@ class VolumeOpsTestCase(test.TestCase):
         self.assertEqual(mock.sentinel.new_backing, ret)
         disk_move_type = 'createNewChildDiskBacking'
         get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
-                                          backing, None, None)
+                                          backing, None, None, None)
         expected = [mock.call(vim_util, 'get_object_property',
                               self.session.vim, backing, 'parent'),
                     mock.call(self.session.vim, 'CloneVM_Task', backing,
@@ -1028,14 +1028,15 @@ class VolumeOpsTestCase(test.TestCase):
         clone_type = None
         disk_type = 'thin'
         host = mock.sentinel.host
+        rp = mock.sentinel.rp
         self.session.invoke_api.reset_mock()
         ret = self.vops.clone_backing(name, backing, snapshot, clone_type,
-                                      datastore, disk_type, host)
+                                      datastore, disk_type, host, rp)
 
         self.assertEqual(mock.sentinel.new_backing, ret)
         disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
         get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
-                                          backing, disk_type, host)
+                                          backing, disk_type, host, rp)
         expected = [mock.call(vim_util, 'get_object_property',
                               self.session.vim, backing, 'parent'),
                     mock.call(self.session.vim, 'CloneVM_Task', backing,
index cbffd2a53d2b10effa313f5bcd8a5d3c0501aa51..49c2c9f8c117d214b400030dd5bd268d29932537 100644 (file)
@@ -1129,7 +1129,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
 
             if disk_conversion:
                 # Clone the temporary backing for disk type conversion.
-                (host, _rp, _folder, summary) = self._select_ds_for_volume(
+                (host, rp, _folder, summary) = self._select_ds_for_volume(
                     volume)
                 datastore = summary.datastore
                 LOG.debug("Cloning temporary backing: %s for disk type "
@@ -1140,7 +1140,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                                              volumeops.FULL_CLONE_TYPE,
                                              datastore,
                                              disk_type,
-                                             host)
+                                             host,
+                                             rp)
                 self._delete_temp_backing(backing)
         except Exception:
             # Delete backing and virtual disk created from image.
@@ -1501,7 +1502,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                     new_backing = self.volumeops.clone_backing(
                         volume['name'], backing, None,
                         volumeops.FULL_CLONE_TYPE, datastore, new_disk_type,
-                        host)
+                        host, rp)
                     self._delete_temp_backing(backing)
                     backing = new_backing
                 except error_util.VimException:
@@ -1715,13 +1716,13 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         renamed = False
         try:
             # Find datastore for clone.
-            (host, _rp, _folder, summary) = self._select_ds_for_volume(volume)
+            (host, rp, _folder, summary) = self._select_ds_for_volume(volume)
             datastore = summary.datastore
 
             disk_type = VMwareEsxVmdkDriver._get_disk_type(volume)
             dest = self.volumeops.clone_backing(dest_name, src, None,
                                                 volumeops.FULL_CLONE_TYPE,
-                                                datastore, disk_type, host)
+                                                datastore, disk_type, host, rp)
             if new_backing:
                 LOG.debug("Created new backing: %s for restoring backup.",
                           dest_name)
@@ -1987,13 +1988,14 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
         """
         datastore = None
         host = None
+        rp = None
         if not clone_type == volumeops.LINKED_CLONE_TYPE:
             # Pick a datastore where to create the full clone under any host
-            (host, _rp, _folder, summary) = self._select_ds_for_volume(volume)
+            (host, rp, _folder, summary) = self._select_ds_for_volume(volume)
             datastore = summary.datastore
         clone = self.volumeops.clone_backing(volume['name'], backing,
                                              snapshot, clone_type, datastore,
-                                             host=host)
+                                             host=host, resource_pool=rp)
         # If the volume size specified by the user is greater than
         # the size of the source volume, the newly created volume will
         # allocate the capacity to the size of the source volume in the backend
index 57198cca08dc0bf441cca597b2b12ab289592af8..1fd1cc74488cc105fc27eaa3e5c84def3297c5c8 100644 (file)
@@ -1000,7 +1000,7 @@ class VMwareVolumeOps(object):
         return self._get_parent(backing, 'Folder')
 
     def _get_clone_spec(self, datastore, disk_move_type, snapshot, backing,
-                        disk_type, host=None):
+                        disk_type, host=None, resource_pool=None):
         """Get the clone spec.
 
         :param datastore: Reference to datastore
@@ -1009,6 +1009,7 @@ class VMwareVolumeOps(object):
         :param backing: Source backing VM
         :param disk_type: Disk type of clone
         :param host: Target host
+        :param resource_pool: Target resource pool
         :return: Clone spec
         """
         if disk_type is not None:
@@ -1016,7 +1017,7 @@ class VMwareVolumeOps(object):
         else:
             disk_device = None
 
-        relocate_spec = self._get_relocate_spec(datastore, None, host,
+        relocate_spec = self._get_relocate_spec(datastore, resource_pool, host,
                                                 disk_move_type, disk_type,
                                                 disk_device)
         cf = self._session.vim.client.factory
@@ -1030,7 +1031,7 @@ class VMwareVolumeOps(object):
         return clone_spec
 
     def clone_backing(self, name, backing, snapshot, clone_type, datastore,
-                      disk_type=None, host=None):
+                      disk_type=None, host=None, resource_pool=None):
         """Clone backing.
 
         If the clone_type is 'full', then a full clone of the source volume
@@ -1044,21 +1045,23 @@ class VMwareVolumeOps(object):
         :param datastore: Reference to the datastore entity
         :param disk_type: Disk type of the clone
         :param host: Target host
+        :param resource_pool: Target resource pool
         """
         LOG.debug("Creating a clone of backing: %(back)s, named: %(name)s, "
                   "clone type: %(type)s from snapshot: %(snap)s on "
-                  "host: %(host)s, datastore: %(ds)s with disk type: "
-                  "%(disk_type)s.",
+                  "resource pool: %(resource_pool)s, host: %(host)s, "
+                  "datastore: %(ds)s with disk type: %(disk_type)s.",
                   {'back': backing, 'name': name, 'type': clone_type,
                    'snap': snapshot, 'ds': datastore, 'disk_type': disk_type,
-                   'host': host})
+                   'host': host, 'resource_pool': resource_pool})
         folder = self._get_folder(backing)
         if clone_type == LINKED_CLONE_TYPE:
             disk_move_type = 'createNewChildDiskBacking'
         else:
             disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
         clone_spec = self._get_clone_spec(datastore, disk_move_type, snapshot,
-                                          backing, disk_type, host)
+                                          backing, disk_type, host,
+                                          resource_pool)
         task = self._session.invoke_api(self._session.vim, 'CloneVM_Task',
                                         backing, folder=folder, name=name,
                                         spec=clone_spec)