From: Vipin Balachandran Date: Wed, 19 Nov 2014 10:38:45 +0000 (+0530) Subject: VMware: Set target ESX host for backing VM clone X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=9e590a6c854cec6df18630954734e4a4e0249a91;p=openstack-build%2Fcinder-build.git VMware: Set target ESX host for backing VM clone The backing VM corresponding to a volume is cloned during create volume from another volume, snapshot or image. The backing VM is also cloned during retype and backup restore. Currently, the target ESX host is unset while calling vCenter CloneVM_Task API and hence the source ESX host is used as the target. If the destination datastore returned by the datastore selection logic is not accessible to the source host, clone fails. This patch fixes the problem by setting the target ESX host (returned by datastore selection logic) while invoking clone. Closes-Bug: #1380602 Change-Id: I030d5ce6378fb70f7f98356114825abc12297687 --- diff --git a/cinder/tests/test_vmware_vmdk.py b/cinder/tests/test_vmware_vmdk.py index 418461800..eea42dc85 100644 --- a/cinder/tests/test_vmware_vmdk.py +++ b/cinder/tests/test_vmware_vmdk.py @@ -1051,7 +1051,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase): select_ds_for_volume.assert_called_once_with(volume) vops.clone_backing.assert_called_once_with( volume['name'], backing, None, volumeops.FULL_CLONE_TYPE, - summary.datastore, disk_type) + summary.datastore, disk_type, mock.sentinel.host) vops.delete_backing.assert_called_once_with(backing) self.assertFalse(extend_disk.called) @@ -1554,7 +1554,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase): vops.rename_backing.assert_called_once_with(backing, uuid) vops.clone_backing.assert_called_once_with( vol['name'], backing, None, volumeops.FULL_CLONE_TYPE, - datastore, vmdk.THIN_VMDK_TYPE) + datastore, vmdk.THIN_VMDK_TYPE, host) delete_temp_backing.assert_called_once_with(backing) vops.change_backing_profile.assert_called_once_with(clone, profile_id) @@ -1766,7 +1766,8 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase): summary = mock.Mock() summary.datastore = mock.sentinel.datastore - select_ds.return_value = (mock.ANY, mock.ANY, mock.ANY, summary) + select_ds.return_value = (mock.sentinel.host, mock.ANY, mock.ANY, + summary) disk_type = vmdk.THIN_VMDK_TYPE get_disk_type.return_value = disk_type @@ -1783,7 +1784,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase): context, src_uuid, volume, tmp_file_path, backup_size) vops.clone_backing.assert_called_once_with( volume['name'], src, None, volumeops.FULL_CLONE_TYPE, - summary.datastore, disk_type) + summary.datastore, disk_type, mock.sentinel.host) delete_temp_backing.assert_called_once_with(src) create_backing.reset_mock() @@ -1805,7 +1806,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase): context, src_uuid, volume, tmp_file_path, backup_size) vops.clone_backing.assert_called_once_with( dest_uuid, src, None, volumeops.FULL_CLONE_TYPE, - summary.datastore, disk_type) + summary.datastore, disk_type, mock.sentinel.host) exp_rename_calls = [mock.call(backing, tmp_uuid), mock.call(dest, volume['name'])] self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list) @@ -2132,7 +2133,8 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase): fake_backing, fake_snapshot, fake_type, - None) + None, + host=None) # If the volume size is greater than the original snapshot size, # _extend_vmdk_virtual_disk will be called. _extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'], @@ -2177,7 +2179,8 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase): fake_backing, fake_snapshot, volumeops.FULL_CLONE_TYPE, - fake_datastore) + fake_datastore, + host=fake_host) # If the volume size is greater than the original snapshot size, # _extend_vmdk_virtual_disk will be called. _extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'], diff --git a/cinder/tests/test_vmware_volumeops.py b/cinder/tests/test_vmware_volumeops.py index 8527217cd..9644413a7 100644 --- a/cinder/tests/test_vmware_volumeops.py +++ b/cinder/tests/test_vmware_volumeops.py @@ -983,7 +983,7 @@ class VolumeOpsTestCase(test.TestCase): self.assertEqual(mock.sentinel.new_backing, ret) disk_move_type = 'moveAllDiskBackingsAndDisallowSharing' get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot, - backing, None) + backing, None, None) expected = [mock.call(vim_util, 'get_object_property', self.session.vim, backing, 'parent'), mock.call(self.session.vim, 'CloneVM_Task', backing, @@ -999,24 +999,25 @@ class VolumeOpsTestCase(test.TestCase): self.assertEqual(mock.sentinel.new_backing, ret) disk_move_type = 'createNewChildDiskBacking' get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot, - backing, None) + backing, None, None) expected = [mock.call(vim_util, 'get_object_property', self.session.vim, backing, 'parent'), mock.call(self.session.vim, 'CloneVM_Task', backing, folder=folder, name=name, spec=clone_spec)] self.assertEqual(expected, self.session.invoke_api.mock_calls) - # Test disk type conversion. + # Test disk type conversion and target host. clone_type = None disk_type = 'thin' + host = mock.sentinel.host self.session.invoke_api.reset_mock() ret = self.vops.clone_backing(name, backing, snapshot, clone_type, - datastore, disk_type) + datastore, disk_type, host) self.assertEqual(mock.sentinel.new_backing, ret) disk_move_type = 'moveAllDiskBackingsAndDisallowSharing' get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot, - backing, disk_type) + backing, disk_type, host) expected = [mock.call(vim_util, 'get_object_property', self.session.vim, backing, 'parent'), mock.call(self.session.vim, 'CloneVM_Task', backing, diff --git a/cinder/volume/drivers/vmware/vmdk.py b/cinder/volume/drivers/vmware/vmdk.py index 65eb11eb0..ac10288af 100644 --- a/cinder/volume/drivers/vmware/vmdk.py +++ b/cinder/volume/drivers/vmware/vmdk.py @@ -1137,7 +1137,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): None, volumeops.FULL_CLONE_TYPE, datastore, - disk_type) + disk_type, + host) self._delete_temp_backing(backing) except Exception: # Delete backing and virtual disk created from image. @@ -1500,7 +1501,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): new_backing = self.volumeops.clone_backing( volume['name'], backing, None, - volumeops.FULL_CLONE_TYPE, datastore, new_disk_type) + volumeops.FULL_CLONE_TYPE, datastore, new_disk_type, + host) self._delete_temp_backing(backing) backing = new_backing except error_util.VimException: @@ -1708,13 +1710,13 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): renamed = False try: # Find datastore for clone. - (_host, _rp, _folder, summary) = self._select_ds_for_volume(volume) + (host, _rp, _folder, summary) = self._select_ds_for_volume(volume) datastore = summary.datastore disk_type = VMwareEsxVmdkDriver._get_disk_type(volume) dest = self.volumeops.clone_backing(dest_name, src, None, volumeops.FULL_CLONE_TYPE, - datastore, disk_type) + datastore, disk_type, host) if new_backing: LOG.debug("Created new backing: %s for restoring backup.", dest_name) @@ -1979,12 +1981,14 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver): :param src_vsize: the size of the source volume """ datastore = None + host = None if not clone_type == volumeops.LINKED_CLONE_TYPE: # Pick a datastore where to create the full clone under any host - (_host, _rp, _folder, summary) = self._select_ds_for_volume(volume) + (host, _rp, _folder, summary) = self._select_ds_for_volume(volume) datastore = summary.datastore clone = self.volumeops.clone_backing(volume['name'], backing, - snapshot, clone_type, datastore) + snapshot, clone_type, datastore, + host=host) # If the volume size specified by the user is greater than # the size of the source volume, the newly created volume will # allocate the capacity to the size of the source volume in the backend diff --git a/cinder/volume/drivers/vmware/volumeops.py b/cinder/volume/drivers/vmware/volumeops.py index 92bdf422e..58b81285e 100644 --- a/cinder/volume/drivers/vmware/volumeops.py +++ b/cinder/volume/drivers/vmware/volumeops.py @@ -998,7 +998,7 @@ class VMwareVolumeOps(object): return self._get_parent(backing, 'Folder') def _get_clone_spec(self, datastore, disk_move_type, snapshot, backing, - disk_type): + disk_type, host=None): """Get the clone spec. :param datastore: Reference to datastore @@ -1006,6 +1006,7 @@ class VMwareVolumeOps(object): :param snapshot: Reference to snapshot :param backing: Source backing VM :param disk_type: Disk type of clone + :param host: Target host :return: Clone spec """ if disk_type is not None: @@ -1013,7 +1014,7 @@ class VMwareVolumeOps(object): else: disk_device = None - relocate_spec = self._get_relocate_spec(datastore, None, None, + relocate_spec = self._get_relocate_spec(datastore, None, host, disk_move_type, disk_type, disk_device) cf = self._session.vim.client.factory @@ -1027,7 +1028,7 @@ class VMwareVolumeOps(object): return clone_spec def clone_backing(self, name, backing, snapshot, clone_type, datastore, - disk_type=None): + disk_type=None, host=None): """Clone backing. If the clone_type is 'full', then a full clone of the source volume @@ -1040,19 +1041,22 @@ class VMwareVolumeOps(object): :param clone_type: Whether a full clone or linked clone is to be made :param datastore: Reference to the datastore entity :param disk_type: Disk type of the clone + :param host: Target host """ LOG.debug("Creating a clone of backing: %(back)s, named: %(name)s, " "clone type: %(type)s from snapshot: %(snap)s on " - "datastore: %(ds)s with disk type: %(disk_type)s.", + "host: %(host)s, datastore: %(ds)s with disk type: " + "%(disk_type)s.", {'back': backing, 'name': name, 'type': clone_type, - 'snap': snapshot, 'ds': datastore, 'disk_type': disk_type}) + 'snap': snapshot, 'ds': datastore, 'disk_type': disk_type, + 'host': host}) folder = self._get_folder(backing) if clone_type == LINKED_CLONE_TYPE: disk_move_type = 'createNewChildDiskBacking' else: disk_move_type = 'moveAllDiskBackingsAndDisallowSharing' clone_spec = self._get_clone_spec(datastore, disk_move_type, snapshot, - backing, disk_type) + backing, disk_type, host) task = self._session.invoke_api(self._session.vim, 'CloneVM_Task', backing, folder=folder, name=name, spec=clone_spec)