From: Vipin Balachandran Date: Wed, 16 Jul 2014 15:14:09 +0000 (+0530) Subject: VMware: Implement backup/restore for VMDK driver X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=cf7e777105a91a35d92b62dba92fb1a7aabe675d;p=openstack-build%2Fcinder-build.git VMware: Implement backup/restore for VMDK driver Currently, backup-create and backup-restore operations are not supported for volumes created by the VMDK driver. This change implements backup_volume and restore_backup APIs for VMDK driver. DocImpact Added a new config option 'vmware_tmp_dir' which specifies the directory in Cinder host to use for storing temporary virtual disk files during backup/restore. Implements: blueprint vmdk-backup Change-Id: Ib19a699f71701c2034e77b8e6b50a8a3295a542b --- diff --git a/cinder/tests/test_vmware_vmdk.py b/cinder/tests/test_vmware_vmdk.py index 1a84e538c..3840cdf8c 100644 --- a/cinder/tests/test_vmware_vmdk.py +++ b/cinder/tests/test_vmware_vmdk.py @@ -143,6 +143,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase): TASK_POLL_INTERVAL = 5.0 IMG_TX_TIMEOUT = 10 MAX_OBJECTS = 100 + TMP_DIR = "/vmware-tmp" VMDK_DRIVER = vmdk.VMwareEsxVmdkDriver def setUp(self): @@ -158,7 +159,10 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase): self._config.vmware_task_poll_interval = self.TASK_POLL_INTERVAL self._config.vmware_image_transfer_timeout_secs = self.IMG_TX_TIMEOUT self._config.vmware_max_objects_retrieval = self.MAX_OBJECTS - self._driver = vmdk.VMwareEsxVmdkDriver(configuration=self._config) + self._config.vmware_tmp_dir = self.TMP_DIR + self._db = mock.Mock() + self._driver = vmdk.VMwareEsxVmdkDriver(configuration=self._config, + db=self._db) api_retry_count = self._config.vmware_api_retry_count, task_poll_interval = self._config.vmware_task_poll_interval, self._session = api.VMwareAPISession(self.IP, self.USERNAME, @@ -1421,6 +1425,292 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase): self._driver._extend_vmdk_virtual_disk, fake_name, fake_size) + @mock.patch.object(vmware_images, 'download_stream_optimized_disk') + @mock.patch('cinder.openstack.common.fileutils.file_open') + @mock.patch.object(VMDK_DRIVER, '_temporary_file') + @mock.patch('cinder.openstack.common.uuidutils.generate_uuid') + @mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory') + @mock.patch.object(VMDK_DRIVER, 'volumeops') + @mock.patch.object(VMDK_DRIVER, 'session') + def test_backup_volume(self, session, vops, create_backing, generate_uuid, + temporary_file, file_open, download_disk): + self._test_backup_volume(session, vops, create_backing, generate_uuid, + temporary_file, file_open, download_disk) + + def _test_backup_volume(self, session, vops, create_backing, generate_uuid, + temporary_file, file_open, download_disk): + volume = {'name': 'vol-1', 'id': 1, 'size': 1} + self._db.volume_get.return_value = volume + + vops.get_backing.return_value = None + backing = mock.sentinel.backing + create_backing.return_value = backing + + uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0" + generate_uuid.return_value = uuid + tmp_file_path = mock.sentinel.tmp_file_path + temporary_file_ret = mock.Mock() + temporary_file.return_value = temporary_file_ret + temporary_file_ret.__enter__ = mock.Mock(return_value=tmp_file_path) + temporary_file_ret.__exit__ = mock.Mock(return_value=None) + + vmdk_path = mock.sentinel.vmdk_path + vops.get_vmdk_path.return_value = vmdk_path + + tmp_file = mock.sentinel.tmp_file + file_open_ret = mock.Mock() + file_open.return_value = file_open_ret + file_open_ret.__enter__ = mock.Mock(return_value=tmp_file) + file_open_ret.__exit__ = mock.Mock(return_value=None) + + context = mock.sentinel.context + backup = {'id': 2, 'volume_id': 1} + backup_service = mock.Mock() + self._driver.backup_volume(context, backup, backup_service) + + create_backing.assert_called_once_with(volume) + temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid) + self.assertEqual(mock.call(tmp_file_path, "wb"), + file_open.call_args_list[0]) + download_disk.assert_called_once_with( + context, self.IMG_TX_TIMEOUT, tmp_file, session=session, + host=self.IP, vm=backing, vmdk_file_path=vmdk_path, + vmdk_size=volume['size'] * units.Gi) + self.assertEqual(mock.call(tmp_file_path, "rb"), + file_open.call_args_list[1]) + backup_service.backup.assert_called_once_with(backup, tmp_file) + + @mock.patch.object(VMDK_DRIVER, 'extend_volume') + @mock.patch.object(VMDK_DRIVER, '_restore_backing') + @mock.patch('cinder.openstack.common.fileutils.file_open') + @mock.patch.object(VMDK_DRIVER, '_temporary_file') + @mock.patch('cinder.openstack.common.uuidutils.generate_uuid') + @mock.patch.object(VMDK_DRIVER, 'volumeops') + def test_restore_backup(self, vops, generate_uuid, temporary_file, + file_open, restore_backing, extend_volume): + self._test_restore_backup(vops, generate_uuid, temporary_file, + file_open, restore_backing, extend_volume) + + def _test_restore_backup( + self, vops, generate_uuid, temporary_file, file_open, + restore_backing, extend_volume): + volume = {'name': 'vol-1', 'id': 1, 'size': 1} + backup = {'id': 2, 'size': 1} + context = mock.sentinel.context + backup_service = mock.Mock() + + backing = mock.sentinel.backing + vops.get_backing.return_value = backing + vops.snapshot_exists.return_value = True + self.assertRaises( + exception.InvalidVolume, self._driver.restore_backup, context, + backup, volume, backup_service) + + uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0" + generate_uuid.return_value = uuid + tmp_file_path = mock.sentinel.tmp_file_path + temporary_file_ret = mock.Mock() + temporary_file.return_value = temporary_file_ret + temporary_file_ret.__enter__ = mock.Mock(return_value=tmp_file_path) + temporary_file_ret.__exit__ = mock.Mock(return_value=None) + + tmp_file = mock.sentinel.tmp_file + file_open_ret = mock.Mock() + file_open.return_value = file_open_ret + file_open_ret.__enter__ = mock.Mock(return_value=tmp_file) + file_open_ret.__exit__ = mock.Mock(return_value=None) + + vops.snapshot_exists.return_value = False + self._driver.restore_backup(context, backup, volume, backup_service) + + temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid) + file_open.assert_called_once_with(tmp_file_path, "wb") + backup_service.restore.assert_called_once_with( + backup, volume['id'], tmp_file) + restore_backing.assert_called_once_with( + context, volume, backing, tmp_file_path, backup['size'] * units.Gi) + self.assertFalse(extend_volume.called) + + temporary_file.reset_mock() + file_open.reset_mock() + backup_service.reset_mock() + restore_backing.reset_mock() + volume = {'name': 'vol-1', 'id': 1, 'size': 2} + self._driver.restore_backup(context, backup, volume, backup_service) + + temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid) + file_open.assert_called_once_with(tmp_file_path, "wb") + backup_service.restore.assert_called_once_with( + backup, volume['id'], tmp_file) + restore_backing.assert_called_once_with( + context, volume, backing, tmp_file_path, backup['size'] * units.Gi) + extend_volume.assert_called_once_with(volume, volume['size']) + + @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing') + @mock.patch.object(VMDK_DRIVER, 'volumeops') + @mock.patch( + 'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type') + @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') + @mock.patch.object(VMDK_DRIVER, + '_create_backing_from_stream_optimized_file') + @mock.patch('cinder.openstack.common.uuidutils.generate_uuid') + def test_restore_backing( + self, generate_uuid, create_backing, select_ds, get_disk_type, + vops, delete_temp_backing): + self._test_restore_backing( + generate_uuid, create_backing, select_ds, get_disk_type, vops, + delete_temp_backing) + + def _test_restore_backing( + self, generate_uuid, create_backing, select_ds, get_disk_type, + vops, delete_temp_backing): + src_uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0" + generate_uuid.return_value = src_uuid + + src = mock.sentinel.src + create_backing.return_value = src + + summary = mock.Mock() + summary.datastore = mock.sentinel.datastore + select_ds.return_value = (mock.ANY, mock.ANY, mock.ANY, summary) + + disk_type = vmdk.THIN_VMDK_TYPE + get_disk_type.return_value = disk_type + + context = mock.sentinel.context + volume = {'name': 'vol-1', 'id': 1, 'size': 1} + backing = None + tmp_file_path = mock.sentinel.tmp_file_path + backup_size = units.Gi + self._driver._restore_backing( + context, volume, backing, tmp_file_path, backup_size) + + create_backing.assert_called_once_with( + context, src_uuid, volume, tmp_file_path, backup_size) + vops.clone_backing.assert_called_once_with( + volume['name'], src, None, volumeops.FULL_CLONE_TYPE, + summary.datastore, disk_type) + delete_temp_backing.assert_called_once_with(src) + + create_backing.reset_mock() + vops.clone_backing.reset_mock() + delete_temp_backing.reset_mock() + + dest_uuid = "de4b0708-f947-4abe-98f8-75e52ce03b7b" + tmp_uuid = "82c2a4f0-9064-4d95-bd88-6567a36018fa" + generate_uuid.side_effect = [src_uuid, dest_uuid, tmp_uuid] + + dest = mock.sentinel.dest + vops.clone_backing.return_value = dest + + backing = mock.sentinel.backing + self._driver._restore_backing( + context, volume, backing, tmp_file_path, backup_size) + + create_backing.assert_called_once_with( + context, src_uuid, volume, tmp_file_path, backup_size) + vops.clone_backing.assert_called_once_with( + dest_uuid, src, None, volumeops.FULL_CLONE_TYPE, + summary.datastore, disk_type) + exp_rename_calls = [mock.call(backing, tmp_uuid), + mock.call(dest, volume['name'])] + self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list) + exp_delete_temp_backing_calls = [mock.call(backing), mock.call(src)] + self.assertEqual(exp_delete_temp_backing_calls, + delete_temp_backing.call_args_list) + + delete_temp_backing.reset_mock() + vops.rename_backing.reset_mock() + + def vops_rename(backing, new_name): + if backing == dest and new_name == volume['name']: + raise error_util.VimException("error") + + vops.rename_backing.side_effect = vops_rename + generate_uuid.side_effect = [src_uuid, dest_uuid, tmp_uuid] + self.assertRaises( + error_util.VimException, self._driver._restore_backing, context, + volume, backing, tmp_file_path, backup_size) + exp_rename_calls = [mock.call(backing, tmp_uuid), + mock.call(dest, volume['name']), + mock.call(backing, volume['name'])] + self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list) + exp_delete_temp_backing_calls = [mock.call(dest), mock.call(src)] + self.assertEqual(exp_delete_temp_backing_calls, + delete_temp_backing.call_args_list) + + @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing') + @mock.patch.object(vmware_images, 'upload_stream_optimized_disk') + @mock.patch('cinder.openstack.common.fileutils.file_open') + @mock.patch.object(VMDK_DRIVER, 'volumeops') + @mock.patch( + 'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type') + @mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id') + @mock.patch.object(VMDK_DRIVER, 'session') + @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') + def test_create_backing_from_stream_optimized_file( + self, select_ds, session, get_storage_profile_id, get_disk_type, + vops, file_open, upload_disk, delete_temp_backing): + self._test_create_backing_from_stream_optimized_file( + select_ds, session, get_storage_profile_id, get_disk_type, vops, + file_open, upload_disk, delete_temp_backing) + + def _test_create_backing_from_stream_optimized_file( + self, select_ds, session, get_storage_profile_id, get_disk_type, + vops, file_open, upload_disk, delete_temp_backing): + rp = mock.sentinel.rp + folder = mock.sentinel.folder + summary = mock.Mock() + summary.name = mock.sentinel.name + select_ds.return_value = (mock.ANY, rp, folder, summary) + + import_spec = mock.Mock() + session.vim.client.factory.create.return_value = import_spec + + profile_id = 'profile-1' + get_storage_profile_id.return_value = profile_id + + disk_type = vmdk.THIN_VMDK_TYPE + get_disk_type.return_value = disk_type + + create_spec = mock.Mock() + vops.get_create_spec.return_value = create_spec + + tmp_file = mock.sentinel.tmp_file + file_open_ret = mock.Mock() + file_open.return_value = file_open_ret + file_open_ret.__enter__ = mock.Mock(return_value=tmp_file) + file_open_ret.__exit__ = mock.Mock(return_value=None) + + vm_ref = mock.sentinel.vm_ref + upload_disk.return_value = vm_ref + + context = mock.sentinel.context + name = 'vm-1' + volume = {'name': 'vol-1', 'id': 1, 'size': 1} + tmp_file_path = mock.sentinel.tmp_file_path + file_size_bytes = units.Gi + ret = self._driver._create_backing_from_stream_optimized_file( + context, name, volume, tmp_file_path, file_size_bytes) + + self.assertEqual(vm_ref, ret) + vops.get_create_spec.assert_called_once_with( + name, 0, disk_type, summary.name, profile_id) + file_open.assert_called_once_with(tmp_file_path, "rb") + upload_disk.assert_called_once_with( + context, self.IMG_TX_TIMEOUT, tmp_file, session=session, + host=self.IP, resource_pool=rp, vm_folder=folder, + vm_create_spec=import_spec, vmdk_size=file_size_bytes) + + upload_disk.side_effect = error_util.VimException("error") + backing = mock.sentinel.backing + vops.get_backing.return_value = backing + self.assertRaises( + error_util.VimException, + self._driver._create_backing_from_stream_optimized_file, + context, name, volume, tmp_file_path, file_size_bytes) + delete_temp_backing.assert_called_once_with(backing) + class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase): """Test class for VMwareVcVmdkDriver.""" @@ -1431,7 +1721,8 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase): def setUp(self): super(VMwareVcVmdkDriverTestCase, self).setUp() self._config.vmware_host_version = self.DEFAULT_VC_VERSION - self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config) + self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config, + db=self._db) def test_get_pbm_wsdl_location(self): # no version returns None @@ -2064,6 +2355,60 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase): self._test_extend_volume(volume_ops, _extend_virtual_disk, _select_ds_for_volume) + @mock.patch.object(vmware_images, 'download_stream_optimized_disk') + @mock.patch('cinder.openstack.common.fileutils.file_open') + @mock.patch.object(VMDK_DRIVER, '_temporary_file') + @mock.patch('cinder.openstack.common.uuidutils.generate_uuid') + @mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory') + @mock.patch.object(VMDK_DRIVER, 'volumeops') + @mock.patch.object(VMDK_DRIVER, 'session') + def test_backup_volume(self, session, vops, create_backing, generate_uuid, + temporary_file, file_open, download_disk): + self._test_backup_volume(session, vops, create_backing, generate_uuid, + temporary_file, file_open, download_disk) + + @mock.patch.object(VMDK_DRIVER, 'extend_volume') + @mock.patch.object(VMDK_DRIVER, '_restore_backing') + @mock.patch('cinder.openstack.common.fileutils.file_open') + @mock.patch.object(VMDK_DRIVER, '_temporary_file') + @mock.patch('cinder.openstack.common.uuidutils.generate_uuid') + @mock.patch.object(VMDK_DRIVER, 'volumeops') + def test_restore_backup(self, vops, generate_uuid, temporary_file, + file_open, restore_backing, extend_volume): + self._test_restore_backup(vops, generate_uuid, temporary_file, + file_open, restore_backing, extend_volume) + + @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing') + @mock.patch.object(VMDK_DRIVER, 'volumeops') + @mock.patch( + 'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type') + @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') + @mock.patch.object(VMDK_DRIVER, + '_create_backing_from_stream_optimized_file') + @mock.patch('cinder.openstack.common.uuidutils.generate_uuid') + def test_restore_backing( + self, generate_uuid, create_backing, select_ds, get_disk_type, + vops, delete_temp_backing): + self._test_restore_backing( + generate_uuid, create_backing, select_ds, get_disk_type, vops, + delete_temp_backing) + + @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing') + @mock.patch.object(vmware_images, 'upload_stream_optimized_disk') + @mock.patch('cinder.openstack.common.fileutils.file_open') + @mock.patch.object(VMDK_DRIVER, 'volumeops') + @mock.patch( + 'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type') + @mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id') + @mock.patch.object(VMDK_DRIVER, 'session') + @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') + def test_create_backing_from_stream_optimized_file( + self, select_ds, session, get_storage_profile_id, get_disk_type, + vops, file_open, upload_disk, delete_temp_backing): + self._test_create_backing_from_stream_optimized_file( + select_ds, session, get_storage_profile_id, get_disk_type, vops, + file_open, upload_disk, delete_temp_backing) + @mock.patch.object(VMDK_DRIVER, '_get_folder_ds_summary') @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_create_backing_with_params(self, vops, get_folder_ds_summary): @@ -2113,6 +2458,27 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase): None, 'lsiLogic') + @mock.patch('cinder.openstack.common.fileutils.ensure_tree') + @mock.patch('cinder.openstack.common.fileutils.delete_if_exists') + @mock.patch('tempfile.mkstemp') + @mock.patch('os.close') + def test_temporary_file( + self, close, mkstemp, delete_if_exists, ensure_tree): + fd = mock.sentinel.fd + tmp = mock.sentinel.tmp + mkstemp.return_value = (fd, tmp) + prefix = ".vmdk" + suffix = "test" + with self._driver._temporary_file(prefix=prefix, + suffix=suffix) as tmp_file: + self.assertEqual(tmp, tmp_file) + ensure_tree.assert_called_once_with(self.TMP_DIR) + mkstemp.assert_called_once_with(dir=self.TMP_DIR, + prefix=prefix, + suffix=suffix) + close.assert_called_once_with(fd) + delete_if_exists.assert_called_once_with(tmp) + class ImageDiskTypeTest(test.TestCase): """Unit tests for ImageDiskType.""" diff --git a/cinder/volume/drivers/vmware/io_util.py b/cinder/volume/drivers/vmware/io_util.py index a0471954b..22108acb9 100644 --- a/cinder/volume/drivers/vmware/io_util.py +++ b/cinder/volume/drivers/vmware/io_util.py @@ -27,6 +27,7 @@ from eventlet import queue from cinder.i18n import _ from cinder.openstack.common import log as logging from cinder.volume.drivers.vmware import error_util +from cinder.volume.drivers.vmware import read_write_util LOG = logging.getLogger(__name__) IO_THREAD_SLEEP_TIME = .01 @@ -181,7 +182,7 @@ class IOThread(object): self._running = True while self._running: try: - data = self.input_file.read(None) + data = self.input_file.read(read_write_util.READ_CHUNKSIZE) if not data: self.stop() self.done.send(True) diff --git a/cinder/volume/drivers/vmware/read_write_util.py b/cinder/volume/drivers/vmware/read_write_util.py index f7dc5c870..a43489b67 100644 --- a/cinder/volume/drivers/vmware/read_write_util.py +++ b/cinder/volume/drivers/vmware/read_write_util.py @@ -199,6 +199,7 @@ class VMwareHTTPWriteVmdk(VMwareHTTPFile): self._lease = lease lease_info = session.invoke_api(vim_util, 'get_object_property', session.vim, lease, 'info') + self._vm_ref = lease_info.entity # Find the url for vmdk device url = self.find_vmdk_url(lease_info, host) if not url: @@ -262,6 +263,10 @@ class VMwareHTTPWriteVmdk(VMwareHTTPFile): LOG.debug("Lease is already in state: %s." % state) super(VMwareHTTPWriteVmdk, self).close() + def get_imported_vm(self): + """"Get managed object reference of the VM created for import.""" + return self._vm_ref + class VMwareHTTPReadVmdk(VMwareHTTPFile): """read VMDK over HTTP using VMware HttpNfcLease.""" diff --git a/cinder/volume/drivers/vmware/vmdk.py b/cinder/volume/drivers/vmware/vmdk.py index b3d02cdfb..b92906e17 100644 --- a/cinder/volume/drivers/vmware/vmdk.py +++ b/cinder/volume/drivers/vmware/vmdk.py @@ -22,14 +22,17 @@ driver creates a virtual machine for each of the volumes. This virtual machine is never powered on and is often referred as the shadow VM. """ +import contextlib import distutils.version as dist_version # pylint: disable=E0611 import os +import tempfile from oslo.config import cfg from cinder import exception from cinder.i18n import _ from cinder.openstack.common import excutils +from cinder.openstack.common import fileutils from cinder.openstack.common import log as logging from cinder.openstack.common import units from cinder.openstack.common import uuidutils @@ -95,6 +98,10 @@ vmdk_opts = [ 'The driver attempts to retrieve the version from VMware ' 'VC server. Set this configuration only if you want to ' 'override the VC server version.'), + cfg.StrOpt('vmware_tmp_dir', + default='/tmp', + help='Directory where virtual disks are stored during volume ' + 'backup and restore.') ] CONF = cfg.CONF @@ -181,7 +188,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): # 1.0 - initial version of driver # 1.1.0 - selection of datastore based on number of host mounts # 1.2.0 - storage profile volume types based placement of volumes - VERSION = '1.2.0' + # 1.3.0 - support for volume backup/restore + VERSION = '1.3.0' def _do_deprecation_warning(self): LOG.warn(_('The VMware ESX VMDK driver is now deprecated and will be ' @@ -1382,6 +1390,227 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): LOG.info(_("Done extending volume %(vol)s to size %(size)s GB.") % {'vol': vol_name, 'size': new_size}) + @contextlib.contextmanager + def _temporary_file(self, *args, **kwargs): + """Create a temporary file and return its path.""" + tmp_dir = self.configuration.vmware_tmp_dir + fileutils.ensure_tree(tmp_dir) + fd, tmp = tempfile.mkstemp( + dir=self.configuration.vmware_tmp_dir, *args, **kwargs) + try: + os.close(fd) + yield tmp + finally: + fileutils.delete_if_exists(tmp) + + def _download_vmdk(self, context, volume, backing, tmp_file_path): + """Download virtual disk in streamOptimized format.""" + timeout = self.configuration.vmware_image_transfer_timeout_secs + host_ip = self.configuration.vmware_host_ip + vmdk_ds_file_path = self.volumeops.get_vmdk_path(backing) + + with fileutils.file_open(tmp_file_path, "wb") as tmp_file: + vmware_images.download_stream_optimized_disk( + context, timeout, tmp_file, session=self.session, + host=host_ip, vm=backing, vmdk_file_path=vmdk_ds_file_path, + vmdk_size=volume['size'] * units.Gi) + + def backup_volume(self, context, backup, backup_service): + """Create a new backup from an existing volume.""" + volume = self.db.volume_get(context, backup['volume_id']) + + LOG.debug("Creating backup: %(backup_id)s for volume: %(name)s.", + {'backup_id': backup['id'], + 'name': volume['name']}) + + backing = self.volumeops.get_backing(volume['name']) + if backing is None: + LOG.debug("Creating backing for volume: %s.", volume['name']) + backing = self._create_backing_in_inventory(volume) + + tmp_vmdk_name = uuidutils.generate_uuid() + with self._temporary_file(suffix=".vmdk", + prefix=tmp_vmdk_name) as tmp_file_path: + # TODO(vbala) Clean up vmware_tmp_dir during driver init. + LOG.debug("Using temporary file: %(tmp_path)s for creating backup:" + " %(backup_id)s.", + {'tmp_path': tmp_file_path, + 'backup_id': backup['id']}) + self._download_vmdk(context, volume, backing, tmp_file_path) + with fileutils.file_open(tmp_file_path, "rb") as tmp_file: + LOG.debug("Calling backup service to backup file: %s.", + tmp_file_path) + backup_service.backup(backup, tmp_file) + LOG.debug("Created backup: %(backup_id)s for volume: " + "%(name)s.", + {'backup_id': backup['id'], + 'name': volume['name']}) + + def _create_backing_from_stream_optimized_file( + self, context, name, volume, tmp_file_path, file_size_bytes): + """Create backing from streamOptimized virtual disk file.""" + LOG.debug("Creating backing: %(name)s from virtual disk: %(path)s.", + {'name': name, + 'path': tmp_file_path}) + + (host, rp, folder, summary) = self._select_ds_for_volume(volume) + LOG.debug("Selected datastore: %(ds)s for backing: %(name)s.", + {'ds': summary.name, + 'name': name}) + + # Prepare import spec for backing. + cf = self.session.vim.client.factory + vm_import_spec = cf.create('ns0:VirtualMachineImportSpec') + + profile_id = self._get_storage_profile_id(volume) + disk_type = VMwareEsxVmdkDriver._get_disk_type(volume) + vm_create_spec = self.volumeops.get_create_spec(name, + 0, + disk_type, + summary.name, + profile_id) + vm_import_spec.configSpec = vm_create_spec + + timeout = self.configuration.vmware_image_transfer_timeout_secs + host_ip = self.configuration.vmware_host_ip + try: + with fileutils.file_open(tmp_file_path, "rb") as tmp_file: + vm_ref = vmware_images.upload_stream_optimized_disk( + context, timeout, tmp_file, session=self.session, + host=host_ip, resource_pool=rp, vm_folder=folder, + vm_create_spec=vm_import_spec, vmdk_size=file_size_bytes) + LOG.debug("Created backing: %(name)s from virtual disk: " + "%(path)s.", + {'name': name, + 'path': tmp_file_path}) + return vm_ref + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Error occurred while creating temporary " + "backing.")) + backing = self.volumeops.get_backing(name) + if backing is not None: + self._delete_temp_backing(backing) + + def _restore_backing( + self, context, volume, backing, tmp_file_path, backup_size): + """Restore backing from backup.""" + # Create temporary backing from streamOptimized file. + src_name = uuidutils.generate_uuid() + src = self._create_backing_from_stream_optimized_file( + context, src_name, volume, tmp_file_path, backup_size) + + # Copy temporary backing for desired disk type conversion. + new_backing = (backing is None) + if new_backing: + # No backing exists; clone can be used as the volume backing. + dest_name = volume['name'] + else: + # Backing exists; clone can be used as the volume backing only + # after deleting the current backing. + dest_name = uuidutils.generate_uuid() + + dest = None + tmp_backing_name = None + renamed = False + try: + # Find datastore for clone. + (host, rp, folder, summary) = self._select_ds_for_volume(volume) + datastore = summary.datastore + + disk_type = VMwareEsxVmdkDriver._get_disk_type(volume) + dest = self.volumeops.clone_backing(dest_name, src, None, + volumeops.FULL_CLONE_TYPE, + datastore, disk_type) + if new_backing: + LOG.debug("Created new backing: %s for restoring backup.", + dest_name) + return + + # Rename current backing. + tmp_backing_name = uuidutils.generate_uuid() + self.volumeops.rename_backing(backing, tmp_backing_name) + renamed = True + + # Rename clone in order to treat it as the volume backing. + self.volumeops.rename_backing(dest, volume['name']) + + # Now we can delete the old backing. + self._delete_temp_backing(backing) + + LOG.debug("Deleted old backing and renamed clone for restoring " + "backup.") + except (error_util.VimException, error_util.VMwareDriverException): + with excutils.save_and_reraise_exception(): + if dest is not None: + # Copy happened; we need to delete the clone. + self._delete_temp_backing(dest) + if renamed: + # Old backing was renamed; we need to undo that. + try: + self.volumeops.rename_backing(backing, + volume['name']) + except error_util.VimException: + LOG.warn(_("Cannot undo volume rename; old name " + "was %(old_name)s and new name is " + "%(new_name)s."), + {'old_name': volume['name'], + 'new_name': tmp_backing_name}, + exc_info=True) + finally: + # Delete the temporary backing. + self._delete_temp_backing(src) + + def restore_backup(self, context, backup, volume, backup_service): + """Restore an existing backup to a new or existing volume. + + This method raises InvalidVolume if the existing volume contains + snapshots since it is not possible to restore the virtual disk of + a backing with snapshots. + """ + LOG.debug("Restoring backup: %(backup_id)s to volume: %(name)s.", + {'backup_id': backup['id'], + 'name': volume['name']}) + + backing = self.volumeops.get_backing(volume['name']) + if backing is not None and self.volumeops.snapshot_exists(backing): + msg = _("Volume cannot be restored since it contains snapshots.") + LOG.error(msg) + raise exception.InvalidVolume(reason=msg) + + tmp_vmdk_name = uuidutils.generate_uuid() + with self._temporary_file(suffix=".vmdk", + prefix=tmp_vmdk_name) as tmp_file_path: + LOG.debug("Using temporary file: %(tmp_path)s for restoring " + "backup: %(backup_id)s.", + {'tmp_path': tmp_file_path, + 'backup_id': backup['id']}) + with fileutils.file_open(tmp_file_path, "wb") as tmp_file: + LOG.debug("Calling backup service to restore backup: " + "%(backup_id)s to file: %(tmp_path)s.", + {'backup_id': backup['id'], + 'tmp_path': tmp_file_path}) + backup_service.restore(backup, volume['id'], tmp_file) + LOG.debug("Backup: %(backup_id)s restored to file: " + "%(tmp_path)s.", + {'backup_id': backup['id'], + 'tmp_path': tmp_file_path}) + self._restore_backing(context, volume, backing, tmp_file_path, + backup['size'] * units.Gi) + + if backup['size'] < volume['size']: + # Current backing size is backup size. + LOG.debug("Backup size: %(backup_size)d is less than " + "volume size: %(vol_size)d; extending volume.", + {'backup_size': backup['size'], + 'vol_size': volume['size']}) + self.extend_volume(volume, volume['size']) + + LOG.debug("Backup: %(backup_id)s restored to volume: " + "%(name)s.", + {'backup_id': backup['id'], + 'name': volume['name']}) + class VMwareVcVmdkDriver(VMwareEsxVmdkDriver): """Manage volumes on VMware VC server.""" diff --git a/cinder/volume/drivers/vmware/vmware_images.py b/cinder/volume/drivers/vmware/vmware_images.py index 211cf0921..0c604501d 100644 --- a/cinder/volume/drivers/vmware/vmware_images.py +++ b/cinder/volume/drivers/vmware/vmware_images.py @@ -159,3 +159,39 @@ def upload_image(context, timeout_secs, image_service, image_id, owner_id, image_service=image_service, image_id=image_id, image_meta=image_metadata) LOG.info(_("Uploaded image: %s to the Glance image server.") % image_id) + + +def download_stream_optimized_disk( + context, timeout_secs, write_handle, **kwargs): + """Download virtual disk in streamOptimized format from VMware server.""" + vmdk_file_path = kwargs.get('vmdk_file_path') + LOG.debug("Downloading virtual disk: %(vmdk_path)s to %(dest)s.", + {'vmdk_path': vmdk_file_path, + 'dest': write_handle.name}) + file_size = kwargs.get('vmdk_size') + read_handle = rw_util.VMwareHTTPReadVmdk(kwargs.get('session'), + kwargs.get('host'), + kwargs.get('vm'), + vmdk_file_path, + file_size) + start_transfer(context, timeout_secs, read_handle, file_size, write_handle) + LOG.debug("Downloaded virtual disk: %s.", vmdk_file_path) + + +def upload_stream_optimized_disk(context, timeout_secs, read_handle, **kwargs): + """Upload virtual disk in streamOptimized format to VMware server.""" + LOG.debug("Uploading virtual disk file: %(path)s to create backing with " + "spec: %(spec)s.", + {'path': read_handle.name, + 'spec': kwargs.get('vm_create_spec')}) + file_size = kwargs.get('vmdk_size') + write_handle = rw_util.VMwareHTTPWriteVmdk(kwargs.get('session'), + kwargs.get('host'), + kwargs.get('resource_pool'), + kwargs.get('vm_folder'), + kwargs.get('vm_create_spec'), + file_size) + start_transfer(context, timeout_secs, read_handle, file_size, + write_file_handle=write_handle) + LOG.debug("Uploaded virtual disk file: %s.", read_handle.name) + return write_handle.get_imported_vm() diff --git a/etc/cinder/cinder.conf.sample b/etc/cinder/cinder.conf.sample index d7e0838d7..87cdd4051 100644 --- a/etc/cinder/cinder.conf.sample +++ b/etc/cinder/cinder.conf.sample @@ -2056,6 +2056,10 @@ # the VC server version. (string value) #vmware_host_version= +# Directory where virtual disks are stored during volume +# backup and restore. (string value) +#vmware_tmp_dir=/tmp + # # Options defined in cinder.volume.drivers.windows.windows