From: Vipin Balachandran Date: Tue, 15 Dec 2015 13:40:37 +0000 (-0800) Subject: VMware: Fix volume copy across vCenter datacenters X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=3509b5374d0006a3f77c1b59d0aa3d70a1914e3b;p=openstack-build%2Fcinder-build.git VMware: Fix volume copy across vCenter datacenters Currently volume creation from another volume or snapshot fails if the source volume/snapshot and the destination volume are in different vCenter datacenters. This is because we are always passing the source datacenter folder as the location of the destination volume while calling vCenter clone API. This patch fixes the problem by passing the correct datacenter folder while copying volume or snapshot. Change-Id: I3bfbc149fc75fbff303b9c9b1c833b499fa2814e Closes-bug: #1521991 --- diff --git a/cinder/tests/unit/test_vmware_vmdk.py b/cinder/tests/unit/test_vmware_vmdk.py index e867fdb61..55cf5c5d7 100644 --- a/cinder/tests/unit/test_vmware_vmdk.py +++ b/cinder/tests/unit/test_vmware_vmdk.py @@ -492,7 +492,8 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): select_ds_for_volume.assert_called_once_with(volume) vops.clone_backing.assert_called_once_with( volume['name'], backing, None, volumeops.FULL_CLONE_TYPE, - datastore, disk_type, host, rp) + datastore, disk_type=disk_type, host=host, resource_pool=rp, + folder=folder) delete_tmp_backing.assert_called_once_with(backing) vops.update_backing_disk_uuid(clone, volume['id']) else: @@ -1000,7 +1001,8 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): vops.rename_backing.assert_called_once_with(backing, uuid) vops.clone_backing.assert_called_once_with( vol['name'], backing, None, volumeops.FULL_CLONE_TYPE, - datastore, vmdk.THIN_VMDK_TYPE, host, rp) + datastore, disk_type=vmdk.THIN_VMDK_TYPE, host=host, + resource_pool=rp, folder=folder) vops.update_backing_disk_uuid.assert_called_once_with(clone, vol['id']) delete_temp_backing.assert_called_once_with(backing) vops.change_backing_profile.assert_called_once_with(clone, @@ -1192,7 +1194,7 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): summary = mock.Mock() summary.datastore = mock.sentinel.datastore select_ds.return_value = (mock.sentinel.host, mock.sentinel.rp, - mock.ANY, summary) + mock.sentinel.folder, summary) disk_type = vmdk.THIN_VMDK_TYPE get_disk_type.return_value = disk_type @@ -1213,7 +1215,8 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): context, src_uuid, volume, tmp_file_path, backup_size) vops.clone_backing.assert_called_once_with( volume['name'], src, None, volumeops.FULL_CLONE_TYPE, - summary.datastore, disk_type, mock.sentinel.host, mock.sentinel.rp) + summary.datastore, disk_type=disk_type, host=mock.sentinel.host, + resource_pool=mock.sentinel.rp, folder=mock.sentinel.folder) vops.update_backing_disk_uuid.assert_called_once_with(dest, volume['id']) delete_temp_backing.assert_called_once_with(src) @@ -1235,7 +1238,8 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): context, src_uuid, volume, tmp_file_path, backup_size) vops.clone_backing.assert_called_once_with( dest_uuid, src, None, volumeops.FULL_CLONE_TYPE, - summary.datastore, disk_type, mock.sentinel.host, mock.sentinel.rp) + summary.datastore, disk_type=disk_type, host=mock.sentinel.host, + resource_pool=mock.sentinel.rp, folder=mock.sentinel.folder) vops.update_backing_disk_uuid.assert_called_once_with(dest, volume['id']) exp_rename_calls = [mock.call(backing, tmp_uuid), @@ -1695,7 +1699,8 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): None, host=None, resource_pool=None, - extra_config=extra_config) + extra_config=extra_config, + folder=None) volume_ops.update_backing_disk_uuid.assert_called_once_with( clone, fake_volume['id']) @@ -1745,15 +1750,16 @@ class VMwareVcVmdkDriverTestCase(test.TestCase): _select_ds_for_volume.assert_called_with(fake_volume) extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: fake_volume['id']} - volume_ops.clone_backing.assert_called_with(fake_volume['name'], - fake_backing, - fake_snapshot, - volumeops.FULL_CLONE_TYPE, - fake_datastore, - host=fake_host, - resource_pool= - fake_resource_pool, - extra_config=extra_config) + volume_ops.clone_backing.assert_called_with( + fake_volume['name'], + fake_backing, + fake_snapshot, + volumeops.FULL_CLONE_TYPE, + fake_datastore, + host=fake_host, + resource_pool=fake_resource_pool, + extra_config=extra_config, + folder=fake_folder) volume_ops.update_backing_disk_uuid.assert_called_once_with( clone, fake_volume['id']) diff --git a/cinder/tests/unit/test_vmware_volumeops.py b/cinder/tests/unit/test_vmware_volumeops.py index d1b98c823..fe45fb7d3 100644 --- a/cinder/tests/unit/test_vmware_volumeops.py +++ b/cinder/tests/unit/test_vmware_volumeops.py @@ -17,6 +17,7 @@ Test suite for VMware VMDK driver volumeops module. """ +import ddt import mock from oslo_utils import units from oslo_vmware import exceptions @@ -27,6 +28,7 @@ from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions from cinder.volume.drivers.vmware import volumeops +@ddt.ddt class VolumeOpsTestCase(test.TestCase): """Unit tests for volumeops module.""" @@ -1221,82 +1223,64 @@ class VolumeOpsTestCase(test.TestCase): disk_device) self._verify_extra_config(ret.config.extraConfig, key, value) + @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' + '_get_folder') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_clone_spec') - def test_clone_backing(self, get_clone_spec): - folder = mock.Mock(name='folder', spec=object) - folder._type = 'Folder' - task = mock.sentinel.task - self.session.invoke_api.side_effect = [folder, task, folder, task, - folder, task] - task_info = mock.Mock(spec=object) - task_info.result = mock.sentinel.new_backing - self.session.wait_for_task.return_value = task_info + def _test_clone_backing( + self, clone_type, folder, get_clone_spec, get_folder): + backing_folder = mock.sentinel.backing_folder + get_folder.return_value = backing_folder + clone_spec = mock.sentinel.clone_spec get_clone_spec.return_value = clone_spec - # Test non-linked clone_backing + + task = mock.sentinel.task + self.session.invoke_api.return_value = task + + clone = mock.sentinel.clone + self.session.wait_for_task.return_value = mock.Mock(result=clone) + name = mock.sentinel.name - backing = mock.Mock(spec=object) - backing._type = 'VirtualMachine' + backing = mock.sentinel.backing snapshot = mock.sentinel.snapshot - clone_type = "anything-other-than-linked" - datastore = mock.sentinel.datstore - ret = self.vops.clone_backing(name, backing, snapshot, clone_type, - datastore) - # verify calls - self.assertEqual(mock.sentinel.new_backing, ret) - disk_move_type = 'moveAllDiskBackingsAndDisallowSharing' - get_clone_spec.assert_called_with( - datastore, disk_move_type, snapshot, backing, None, host=None, - resource_pool=None, extra_config=None) - expected = [mock.call(vim_util, 'get_object_property', - self.session.vim, backing, 'parent'), - mock.call(self.session.vim, 'CloneVM_Task', backing, - folder=folder, name=name, spec=clone_spec)] - self.assertEqual(expected, self.session.invoke_api.mock_calls) - - # Test linked clone_backing - clone_type = volumeops.LINKED_CLONE_TYPE - self.session.invoke_api.reset_mock() - ret = self.vops.clone_backing(name, backing, snapshot, clone_type, - datastore) - # verify calls - self.assertEqual(mock.sentinel.new_backing, ret) - disk_move_type = 'createNewChildDiskBacking' - get_clone_spec.assert_called_with( - datastore, disk_move_type, snapshot, backing, None, host=None, - resource_pool=None, extra_config=None) - expected = [mock.call(vim_util, 'get_object_property', - self.session.vim, backing, 'parent'), - mock.call(self.session.vim, 'CloneVM_Task', backing, - folder=folder, name=name, spec=clone_spec)] - self.assertEqual(expected, self.session.invoke_api.mock_calls) - - # Test with optional params (disk_type, host, resource_pool and - # extra_config). - clone_type = None - disk_type = 'thin' + datastore = mock.sentinel.datastore + disk_type = mock.sentinel.disk_type host = mock.sentinel.host - rp = mock.sentinel.rp + resource_pool = mock.sentinel.resource_pool extra_config = mock.sentinel.extra_config - self.session.invoke_api.reset_mock() - ret = self.vops.clone_backing(name, backing, snapshot, clone_type, - datastore, disk_type, host, rp, - extra_config) - - self.assertEqual(mock.sentinel.new_backing, ret) - disk_move_type = 'moveAllDiskBackingsAndDisallowSharing' - get_clone_spec.assert_called_with( - datastore, disk_move_type, snapshot, backing, disk_type, host=host, - resource_pool=rp, extra_config=extra_config) - expected = [mock.call(vim_util, 'get_object_property', - self.session.vim, backing, 'parent'), - mock.call(self.session.vim, 'CloneVM_Task', backing, - folder=folder, name=name, spec=clone_spec)] - self.assertEqual(expected, self.session.invoke_api.mock_calls) + ret = self.vops.clone_backing( + name, backing, snapshot, clone_type, datastore, + disk_type=disk_type, host=host, resource_pool=resource_pool, + extra_config=extra_config, folder=folder) - # Clear side effects. - self.session.invoke_api.side_effect = None + if folder: + self.assertFalse(get_folder.called) + else: + get_folder.assert_called_once_with(backing) + + if clone_type == 'linked': + exp_disk_move_type = 'createNewChildDiskBacking' + else: + exp_disk_move_type = 'moveAllDiskBackingsAndDisallowSharing' + get_clone_spec.assert_called_once_with( + datastore, exp_disk_move_type, snapshot, backing, disk_type, + host=host, resource_pool=resource_pool, extra_config=extra_config) + + exp_folder = folder if folder else backing_folder + self.session.invoke_api.assert_called_once_with( + self.session.vim, 'CloneVM_Task', backing, folder=exp_folder, + name=name, spec=clone_spec) + + self.session.wait_for_task.assert_called_once_with(task) + self.assertEqual(clone, ret) + + @ddt.data('linked', 'full') + def test_clone_backing(self, clone_type): + self._test_clone_backing(clone_type, mock.sentinel.folder) + + def test_clone_backing_with_empty_folder(self): + self._test_clone_backing('linked', None) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_specs_for_disk_add') diff --git a/cinder/volume/drivers/vmware/vmdk.py b/cinder/volume/drivers/vmware/vmdk.py index d01b87ebc..164c45254 100644 --- a/cinder/volume/drivers/vmware/vmdk.py +++ b/cinder/volume/drivers/vmware/vmdk.py @@ -936,7 +936,7 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): if disk_conversion: # Clone the temporary backing for disk type conversion. - (host, rp, _folder, summary) = self._select_ds_for_volume( + (host, rp, folder, summary) = self._select_ds_for_volume( volume) datastore = summary.datastore LOG.debug("Cloning temporary backing: %s for disk type " @@ -946,9 +946,10 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): None, volumeops.FULL_CLONE_TYPE, datastore, - disk_type, - host, - rp) + disk_type=disk_type, + host=host, + resource_pool=rp, + folder=folder) self._delete_temp_backing(backing) backing = clone self.volumeops.update_backing_disk_uuid(backing, volume['id']) @@ -1285,6 +1286,8 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): return False (host, rp, summary) = best_candidate + dc = self.volumeops.get_dc(rp) + folder = self._get_volume_group_folder(dc, volume['project_id']) new_datastore = summary.datastore if datastore.value != new_datastore.value: # Datastore changed; relocate the backing. @@ -1292,10 +1295,6 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): backing) self.volumeops.relocate_backing( backing, new_datastore, rp, host, new_disk_type) - - dc = self.volumeops.get_dc(rp) - folder = self._get_volume_group_folder(dc, - volume['project_id']) self.volumeops.move_backing_to_folder(backing, folder) elif need_disk_type_conversion: # Same datastore, but clone is needed for disk type conversion. @@ -1311,8 +1310,9 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): new_backing = self.volumeops.clone_backing( volume['name'], backing, None, - volumeops.FULL_CLONE_TYPE, datastore, new_disk_type, - host, rp) + volumeops.FULL_CLONE_TYPE, datastore, + disk_type=new_disk_type, host=host, + resource_pool=rp, folder=folder) self.volumeops.update_backing_disk_uuid(new_backing, volume['id']) self._delete_temp_backing(backing) @@ -1552,13 +1552,15 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): renamed = False try: # Find datastore for clone. - (host, rp, _folder, summary) = self._select_ds_for_volume(volume) + (host, rp, folder, summary) = self._select_ds_for_volume(volume) datastore = summary.datastore disk_type = VMwareVcVmdkDriver._get_disk_type(volume) dest = self.volumeops.clone_backing(dest_name, src, None, volumeops.FULL_CLONE_TYPE, - datastore, disk_type, host, rp) + datastore, disk_type=disk_type, + host=host, resource_pool=rp, + folder=folder) self.volumeops.update_backing_disk_uuid(dest, volume['id']) if new_backing: LOG.debug("Created new backing: %s for restoring backup.", @@ -1842,15 +1844,17 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): datastore = None host = None rp = None + folder = None if not clone_type == volumeops.LINKED_CLONE_TYPE: # Pick a datastore where to create the full clone under any host - (host, rp, _folder, summary) = self._select_ds_for_volume(volume) + (host, rp, folder, summary) = self._select_ds_for_volume(volume) datastore = summary.datastore extra_config = self._get_extra_config(volume) clone = self.volumeops.clone_backing(volume['name'], backing, snapshot, clone_type, datastore, host=host, resource_pool=rp, - extra_config=extra_config) + extra_config=extra_config, + folder=folder) self.volumeops.update_backing_disk_uuid(clone, volume['id']) # If the volume size specified by the user is greater than # the size of the source volume, the newly created volume will diff --git a/cinder/volume/drivers/vmware/volumeops.py b/cinder/volume/drivers/vmware/volumeops.py index c238e32c6..4c24d8f19 100644 --- a/cinder/volume/drivers/vmware/volumeops.py +++ b/cinder/volume/drivers/vmware/volumeops.py @@ -1145,7 +1145,7 @@ class VMwareVolumeOps(object): def clone_backing(self, name, backing, snapshot, clone_type, datastore, disk_type=None, host=None, resource_pool=None, - extra_config=None): + extra_config=None, folder=None): """Clone backing. If the clone_type is 'full', then a full clone of the source volume @@ -1162,6 +1162,7 @@ class VMwareVolumeOps(object): :param resource_pool: Target resource pool :param extra_config: Key-value pairs to be written to backing's extra-config + :param folder: The location of the clone """ LOG.debug("Creating a clone of backing: %(back)s, named: %(name)s, " "clone type: %(type)s from snapshot: %(snap)s on " @@ -1170,7 +1171,11 @@ class VMwareVolumeOps(object): {'back': backing, 'name': name, 'type': clone_type, 'snap': snapshot, 'ds': datastore, 'disk_type': disk_type, 'host': host, 'resource_pool': resource_pool}) - folder = self._get_folder(backing) + + if folder is None: + # Use source folder as the location of the clone. + folder = self._get_folder(backing) + if clone_type == LINKED_CLONE_TYPE: disk_move_type = 'createNewChildDiskBacking' else: