]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
VMDK copy_image_to_volume and copy_volume_to_image
authorSubramanian Neelakantan <subramanian.neelakantan@gmail.com>
Fri, 23 Aug 2013 12:19:17 +0000 (17:49 +0530)
committerJohn Griffith <john.griffith@solidfire.com>
Wed, 4 Sep 2013 23:56:27 +0000 (17:56 -0600)
Implemented copy_image_to_volume that creates a new volume backing (vm) and
copies over the vmdk file from the glance image. Only glance images of disk
format 'vmdk' can be used to create a volume using this driver.

Also implemented copy_volume_to_image that creates a new glance image using
the volume's vmdk file. The steps involved are to take a snapshot of volume
vmdk, take a copy of this read only file and upload to glance.

Cleaned up docstrings as pointed out in earlier reviews.

Added timeout to avoid blocking wait on read/write threads and also fixed a
bug to upload image from copy of vmdk instead of from original vmdk during
upload to glance.

Implements: blueprint vmware-vmdk-cinder-driver
Change-Id: I00d22861f4e01ae0862dbf4b60af314c475b7d38

cinder/image/glance.py
cinder/tests/test_vmware_vmdk.py
cinder/volume/drivers/vmware/io_util.py [new file with mode: 0644]
cinder/volume/drivers/vmware/read_write_util.py [new file with mode: 0644]
cinder/volume/drivers/vmware/vmdk.py
cinder/volume/drivers/vmware/vmware_images.py [new file with mode: 0644]
cinder/volume/drivers/vmware/volumeops.py
etc/cinder/cinder.conf.sample

index 3359283d03b5715bd5c3f96c5a6db7ac5581df88..5ff8c4da52a56c7bead86dd980adccd1bbeb49e9 100644 (file)
@@ -24,11 +24,11 @@ from __future__ import absolute_import
 import copy
 import itertools
 import random
+import shutil
 import sys
 import time
 import urlparse
 
-import glanceclient
 import glanceclient.exc
 from oslo.config import cfg
 
@@ -37,8 +37,15 @@ from cinder.openstack.common import jsonutils
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import timeutils
 
-
+glance_opts = [
+    cfg.ListOpt('allowed_direct_url_schemes',
+                default=[],
+                help='A list of url schemes that can be downloaded directly '
+                     'via the direct_url.  Currently supported schemes: '
+                     '[file].'),
+]
 CONF = cfg.CONF
+CONF.register_opts(glance_opts)
 
 LOG = logging.getLogger(__name__)
 
@@ -240,15 +247,29 @@ class GlanceImageService(object):
 
         return getattr(image_meta, 'direct_url', None)
 
-    def download(self, context, image_id, data):
-        """Calls out to Glance for metadata and data and writes data."""
+    def download(self, context, image_id, data=None):
+        """Calls out to Glance for data and writes data."""
+        if 'file' in CONF.allowed_direct_url_schemes:
+            location = self.get_location(context, image_id)
+            o = urlparse.urlparse(location)
+            if o.scheme == "file":
+                with open(o.path, "r") as f:
+                    # a system call to cp could have significant performance
+                    # advantages, however we do not have the path to files at
+                    # this point in the abstraction.
+                    shutil.copyfileobj(f, data)
+                return
+
         try:
             image_chunks = self._client.call(context, 'data', image_id)
         except Exception:
             _reraise_translated_image_exception(image_id)
 
-        for chunk in image_chunks:
-            data.write(chunk)
+        if not data:
+            return image_chunks
+        else:
+            for chunk in image_chunks:
+                data.write(chunk)
 
     def create(self, context, image_meta, data=None):
         """Store the image data and return the new image object."""
index 0b8dd1c3d1710012c30cbef4622d809e8d5743fa..6fdff3ffce7878873a268a297f333b8241b300f0 100644 (file)
@@ -22,6 +22,7 @@ Test suite for VMware VMDK driver.
 import mox
 
 from cinder import exception
+from cinder.image import glance
 from cinder import test
 from cinder import units
 from cinder.volume import configuration
@@ -29,6 +30,7 @@ from cinder.volume.drivers.vmware import api
 from cinder.volume.drivers.vmware import error_util
 from cinder.volume.drivers.vmware import vim_util
 from cinder.volume.drivers.vmware import vmdk
+from cinder.volume.drivers.vmware import vmware_images
 from cinder.volume.drivers.vmware import volumeops
 
 
@@ -37,6 +39,10 @@ class FakeVim(object):
     def service_content(self):
         return mox.MockAnything()
 
+    @property
+    def client(self):
+        return mox.MockAnything()
+
     def Login(self, session_manager, userName, password):
         return mox.MockAnything()
 
@@ -100,6 +106,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
     VOLUME_FOLDER = 'cinder-volumes'
     API_RETRY_COUNT = 3
     TASK_POLL_INTERVAL = 5.0
+    IMG_TX_TIMEOUT = 10
 
     def setUp(self):
         super(VMwareEsxVmdkDriverTestCase, self).setUp()
@@ -112,6 +119,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
         self._config.vmware_volume_folder = self.VOLUME_FOLDER
         self._config.vmware_api_retry_count = self.API_RETRY_COUNT
         self._config.vmware_task_poll_interval = self.TASK_POLL_INTERVAL
+        self._config.vmware_image_transfer_timeout_secs = self.IMG_TX_TIMEOUT
         self._driver = vmdk.VMwareEsxVmdkDriver(configuration=self._config)
         api_retry_count = self._config.vmware_api_retry_count,
         task_poll_interval = self._config.vmware_task_poll_interval,
@@ -832,7 +840,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
         m.VerifyAll()
 
     def test_delete_file(self):
-        """Test _delete_file."""
+        """Test delete_file."""
         m = mox.Mox()
         m.StubOutWithMock(api.VMwareAPISession, 'vim')
         self._session.vim = self._vim
@@ -846,7 +854,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
         self._session.wait_for_task(task)
 
         m.ReplayAll()
-        self._volumeops._delete_file(src_path)
+        self._volumeops.delete_file(src_path)
         m.UnsetStubs()
         m.VerifyAll()
 
@@ -902,7 +910,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
         volume['name'] = 'volume_name'
         volume['size'] = 1
         m.StubOutWithMock(self._volumeops, 'get_path_name')
-        src_path = '/vmfs/volumes/datastore/vm/'
+        src_path = '[datastore1] vm/'
         vmx_name = 'vm.vmx'
         backing = FakeMor('VirtualMachine', 'my_back')
         self._volumeops.get_path_name(backing).AndReturn(src_path + vmx_name)
@@ -921,10 +929,10 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
                                             datastores).AndReturn((folder,
                                                                    summary))
         m.StubOutWithMock(self._volumeops, 'copy_backing')
-        dest_path = '[%s] %s' % (summary.name, volume['name'])
+        dest_path = '[%s] %s/' % (summary.name, volume['name'])
         self._volumeops.copy_backing(src_path, dest_path)
         m.StubOutWithMock(self._volumeops, 'register_backing')
-        self._volumeops.register_backing(dest_path + '/' + vmx_name,
+        self._volumeops.register_backing(dest_path + vmx_name,
                                          volume['name'], folder, resource_pool)
 
         m.ReplayAll()
@@ -1039,6 +1047,296 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
         m.UnsetStubs()
         m.VerifyAll()
 
+    def test_get_entity_name(self):
+        """Test volumeops get_entity_name."""
+        m = mox.Mox()
+        m.StubOutWithMock(api.VMwareAPISession, 'vim')
+        self._session.vim = self._vim
+        m.StubOutWithMock(self._session, 'invoke_api')
+        entity = FakeMor('VirtualMachine', 'virt')
+        self._session.invoke_api(vim_util, 'get_object_property',
+                                 self._vim, entity, 'name')
+
+        m.ReplayAll()
+        self._volumeops.get_entity_name(entity)
+        m.UnsetStubs()
+        m.VerifyAll()
+
+    def test_get_vmdk_path(self):
+        """Test volumeops get_vmdk_path."""
+        m = mox.Mox()
+        m.StubOutWithMock(api.VMwareAPISession, 'vim')
+        self._session.vim = self._vim
+        m.StubOutWithMock(self._session, 'invoke_api')
+        backing = FakeMor('VirtualMachine', 'my_back')
+        vmdk_path = '[datastore 1] folders/myvols/volume-123.vmdk'
+
+        class VirtualDisk:
+            pass
+        virtualDisk = VirtualDisk()
+
+        class VirtualDiskFlatVer2BackingInfo:
+            pass
+        backingInfo = VirtualDiskFlatVer2BackingInfo()
+        backingInfo.fileName = vmdk_path
+        virtualDisk.backing = backingInfo
+        devices = [FakeObject(), virtualDisk, FakeObject()]
+
+        moxed = self._session.invoke_api(vim_util, 'get_object_property',
+                                         self._vim, backing,
+                                         'config.hardware.device')
+        moxed.AndReturn(devices)
+
+        m.ReplayAll()
+        actual_vmdk_path = self._volumeops.get_vmdk_path(backing)
+        self.assertEquals(backingInfo.__class__.__name__,
+                          'VirtualDiskFlatVer2BackingInfo')
+        self.assertEquals(virtualDisk.__class__.__name__, 'VirtualDisk')
+        self.assertEquals(actual_vmdk_path, vmdk_path)
+        m.UnsetStubs()
+        m.VerifyAll()
+
+    def test_copy_vmdk_file(self):
+        """Test copy_vmdk_file."""
+        m = mox.Mox()
+        m.StubOutWithMock(api.VMwareAPISession, 'vim')
+        self._session.vim = self._vim
+        m.StubOutWithMock(self._session, 'invoke_api')
+        dc_ref = FakeMor('Datacenter', 'dc1')
+        src_path = 'src_path'
+        dest_path = 'dest_path'
+        task = FakeMor('Task', 'my_task')
+        self._session.invoke_api(self._vim, 'CopyVirtualDisk_Task',
+                                 mox.IgnoreArg(), sourceName=src_path,
+                                 sourceDatacenter=dc_ref, destName=dest_path,
+                                 destDatacenter=dc_ref,
+                                 force=True).AndReturn(task)
+        m.StubOutWithMock(self._session, 'wait_for_task')
+        self._session.wait_for_task(task)
+
+        m.ReplayAll()
+        self._volumeops.copy_vmdk_file(dc_ref, src_path, dest_path)
+        m.UnsetStubs()
+        m.VerifyAll()
+
+    def test_delete_vmdk_file(self):
+        """Test delete_vmdk_file."""
+        m = mox.Mox()
+        m.StubOutWithMock(api.VMwareAPISession, 'vim')
+        self._session.vim = self._vim
+        m.StubOutWithMock(self._session, 'invoke_api')
+        dc_ref = FakeMor('Datacenter', 'dc1')
+        vmdk_path = 'vmdk_path'
+        task = FakeMor('Task', 'my_task')
+        self._session.invoke_api(self._vim, 'DeleteVirtualDisk_Task',
+                                 mox.IgnoreArg(), name=vmdk_path,
+                                 datacenter=dc_ref).AndReturn(task)
+        m.StubOutWithMock(self._session, 'wait_for_task')
+        self._session.wait_for_task(task)
+
+        m.ReplayAll()
+        self._volumeops.delete_vmdk_file(vmdk_path, dc_ref)
+        m.UnsetStubs()
+        m.VerifyAll()
+
+    def test_split_datastore_path(self):
+        """Test volumeops split_datastore_path."""
+        test1 = '[datastore1] myfolder/mysubfolder/myvm.vmx'
+        (datastore,
+         folder,
+         file_name) = volumeops.split_datastore_path(test1)
+        self.assertEquals(datastore, 'datastore1')
+        self.assertEquals(folder, 'myfolder/mysubfolder/')
+        self.assertEquals(file_name, 'myvm.vmx')
+        test2 = '[datastore2 ]   myfolder/myvm.vmdk'
+        (datastore,
+         folder,
+         file_name) = volumeops.split_datastore_path(test2)
+        self.assertEquals(datastore, 'datastore2')
+        self.assertEquals(folder, 'myfolder/')
+        self.assertEquals(file_name, 'myvm.vmdk')
+        test3 = 'myfolder/myvm.vmdk'
+        self.assertRaises(IndexError, volumeops.split_datastore_path, test3)
+
+    def test_copy_image_to_volume_non_vmdk(self):
+        """Test copy_image_to_volume for a non-vmdk disk format."""
+        m = mox.Mox()
+        image_id = 'image-123456789'
+        image_meta = FakeObject()
+        image_meta['disk_format'] = 'novmdk'
+        image_service = m.CreateMock(glance.GlanceImageService)
+        image_service.show(mox.IgnoreArg(), image_id).AndReturn(image_meta)
+
+        m.ReplayAll()
+        self.assertRaises(exception.ImageUnacceptable,
+                          self._driver.copy_image_to_volume,
+                          mox.IgnoreArg(), mox.IgnoreArg(),
+                          image_service, image_id)
+        m.UnsetStubs()
+        m.VerifyAll()
+
+    def test_copy_image_to_volume_vmdk(self):
+        """Test copy_image_to_volume with an acceptable vmdk disk format."""
+        m = mox.Mox()
+        m.StubOutWithMock(self._driver.__class__, 'session')
+        self._driver.session = self._session
+        m.StubOutWithMock(api.VMwareAPISession, 'vim')
+        self._session.vim = self._vim
+        m.StubOutWithMock(self._driver.__class__, 'volumeops')
+        self._driver.volumeops = self._volumeops
+
+        image_id = 'image-id'
+        image_meta = FakeObject()
+        image_meta['disk_format'] = 'vmdk'
+        image_meta['size'] = 1024 * 1024
+        image_service = m.CreateMock(glance.GlanceImageService)
+        image_service.show(mox.IgnoreArg(), image_id).AndReturn(image_meta)
+        volume = FakeObject()
+        vol_name = 'volume name'
+        volume['name'] = vol_name
+        backing = FakeMor('VirtualMachine', 'my_vm')
+        m.StubOutWithMock(self._driver, '_create_backing_in_inventory')
+        self._driver._create_backing_in_inventory(volume).AndReturn(backing)
+        datastore_name = 'datastore1'
+        flat_vmdk_path = 'myvolumes/myvm-flat.vmdk'
+        m.StubOutWithMock(self._driver, '_get_ds_name_flat_vmdk_path')
+        moxed = self._driver._get_ds_name_flat_vmdk_path(mox.IgnoreArg(),
+                                                         vol_name)
+        moxed.AndReturn((datastore_name, flat_vmdk_path))
+        host = FakeMor('Host', 'my_host')
+        m.StubOutWithMock(self._volumeops, 'get_host')
+        self._volumeops.get_host(backing).AndReturn(host)
+        datacenter = FakeMor('Datacenter', 'my_datacenter')
+        m.StubOutWithMock(self._volumeops, 'get_dc')
+        self._volumeops.get_dc(host).AndReturn(datacenter)
+        datacenter_name = 'my-datacenter'
+        m.StubOutWithMock(self._volumeops, 'get_entity_name')
+        self._volumeops.get_entity_name(datacenter).AndReturn(datacenter_name)
+        flat_path = '[%s] %s' % (datastore_name, flat_vmdk_path)
+        m.StubOutWithMock(self._volumeops, 'delete_file')
+        self._volumeops.delete_file(flat_path, datacenter)
+        client = FakeObject()
+        client.options = FakeObject()
+        client.options.transport = FakeObject()
+        cookies = FakeObject()
+        client.options.transport.cookiejar = cookies
+        m.StubOutWithMock(self._vim.__class__, 'client')
+        self._vim.client = client
+        m.StubOutWithMock(vmware_images, 'fetch_image')
+        timeout = self._config.vmware_image_transfer_timeout_secs
+        vmware_images.fetch_image(mox.IgnoreArg(), timeout, image_service,
+                                  image_id, host=self.IP,
+                                  data_center_name=datacenter_name,
+                                  datastore_name=datastore_name,
+                                  cookies=cookies,
+                                  file_path=flat_vmdk_path)
+
+        m.ReplayAll()
+        self._driver.copy_image_to_volume(mox.IgnoreArg(), volume,
+                                          image_service, image_id)
+        m.UnsetStubs()
+        m.VerifyAll()
+
+    def test_copy_volume_to_image_non_vmdk(self):
+        """Test copy_volume_to_image for a non-vmdk disk format."""
+        m = mox.Mox()
+        image_meta = FakeObject()
+        image_meta['disk_format'] = 'novmdk'
+        volume = FakeObject()
+        volume['name'] = 'vol-name'
+
+        m.ReplayAll()
+        self.assertRaises(exception.ImageUnacceptable,
+                          self._driver.copy_volume_to_image,
+                          mox.IgnoreArg(), volume,
+                          mox.IgnoreArg(), image_meta)
+        m.UnsetStubs()
+        m.VerifyAll()
+
+    def test_copy_volume_to_image_vmdk(self):
+        """Test copy_volume_to_image for a valid vmdk disk format."""
+        m = mox.Mox()
+        m.StubOutWithMock(self._driver.__class__, 'session')
+        self._driver.session = self._session
+        m.StubOutWithMock(api.VMwareAPISession, 'vim')
+        self._session.vim = self._vim
+        m.StubOutWithMock(self._driver.__class__, 'volumeops')
+        self._driver.volumeops = self._volumeops
+
+        image_id = 'image-id-1'
+        image_meta = FakeObject()
+        image_meta['disk_format'] = 'vmdk'
+        image_meta['id'] = image_id
+        image_meta['name'] = image_id
+        image_service = FakeObject()
+        vol_name = 'volume-123456789'
+        project_id = 'project-owner-id-123'
+        volume = FakeObject()
+        volume['name'] = vol_name
+        volume['project_id'] = project_id
+        # volumeops.get_backing
+        backing = FakeMor("VirtualMachine", "my_vm")
+        m.StubOutWithMock(self._volumeops, 'get_backing')
+        self._volumeops.get_backing(vol_name).AndReturn(backing)
+        # volumeops.get_vmdk_path
+        datastore_name = 'datastore1'
+        file_path = 'my_folder/my_nested_folder/my_vm.vmdk'
+        vmdk_file_path = '[%s] %s' % (datastore_name, file_path)
+        m.StubOutWithMock(self._volumeops, 'get_vmdk_path')
+        self._volumeops.get_vmdk_path(backing).AndReturn(vmdk_file_path)
+        # volumeops.create_snapshot
+        snapshot_name = 'snapshot-%s' % image_id
+        m.StubOutWithMock(self._volumeops, 'create_snapshot')
+        self._volumeops.create_snapshot(backing, snapshot_name, None, True)
+        tmp_vmdk = '[datastore1] %s.vmdk' % image_id
+        # volumeops.get_host
+        host = FakeMor('Host', 'my_host')
+        m.StubOutWithMock(self._volumeops, 'get_host')
+        self._volumeops.get_host(backing).AndReturn(host)
+        # volumeops.get_dc
+        datacenter_name = 'my_datacenter'
+        datacenter = FakeMor('Datacenter', datacenter_name)
+        m.StubOutWithMock(self._volumeops, 'get_dc')
+        self._volumeops.get_dc(host).AndReturn(datacenter)
+        # volumeops.copy_vmdk_file
+        m.StubOutWithMock(self._volumeops, 'copy_vmdk_file')
+        self._volumeops.copy_vmdk_file(datacenter, vmdk_file_path, tmp_vmdk)
+        # host_ip
+        host_ip = self.IP
+        # volumeops.get_entity_name
+        m.StubOutWithMock(self._volumeops, 'get_entity_name')
+        self._volumeops.get_entity_name(datacenter).AndReturn(datacenter_name)
+        # cookiejar
+        client = FakeObject()
+        client.options = FakeObject()
+        client.options.transport = FakeObject()
+        cookies = FakeObject()
+        client.options.transport.cookiejar = cookies
+        m.StubOutWithMock(self._vim.__class__, 'client')
+        self._vim.client = client
+        # flat_vmdk
+        flat_vmdk_file = '%s-flat.vmdk' % image_id
+        # vmware_images.upload_image
+        timeout = self._config.vmware_image_transfer_timeout_secs
+        m.StubOutWithMock(vmware_images, 'upload_image')
+        vmware_images.upload_image(mox.IgnoreArg(), timeout, image_service,
+                                   image_id, project_id, host=host_ip,
+                                   data_center_name=datacenter_name,
+                                   datastore_name=datastore_name,
+                                   cookies=cookies,
+                                   file_path=flat_vmdk_file,
+                                   snapshot_name=image_meta['name'],
+                                   image_version=1)
+        # volumeops.delete_vmdk_file
+        m.StubOutWithMock(self._volumeops, 'delete_vmdk_file')
+        self._volumeops.delete_vmdk_file(tmp_vmdk, datacenter)
+
+        m.ReplayAll()
+        self._driver.copy_volume_to_image(mox.IgnoreArg(), volume,
+                                          image_service, image_meta)
+        m.UnsetStubs()
+        m.VerifyAll()
+
 
 class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
     """Test class for VMwareVcVmdkDriver."""
diff --git a/cinder/volume/drivers/vmware/io_util.py b/cinder/volume/drivers/vmware/io_util.py
new file mode 100644 (file)
index 0000000..435e98d
--- /dev/null
@@ -0,0 +1,185 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 VMware, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Utility classes for defining the time saving transfer of data from the reader
+to the write using a LightQueue as a Pipe between the reader and the writer.
+"""
+
+from eventlet import event
+from eventlet import greenthread
+from eventlet import queue
+
+from cinder import exception
+from cinder.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+IO_THREAD_SLEEP_TIME = .01
+GLANCE_POLL_INTERVAL = 5
+
+
+class ThreadSafePipe(queue.LightQueue):
+    """The pipe to hold the data which the reader writes to and the writer
+    reads from.
+    """
+    def __init__(self, maxsize, transfer_size):
+        queue.LightQueue.__init__(self, maxsize)
+        self.transfer_size = transfer_size
+        self.transferred = 0
+
+    def read(self, chunk_size):
+        """Read data from the pipe.
+
+        Chunksize if ignored for we have ensured that the data chunks written
+        to the pipe by readers is the same as the chunks asked for by Writer.
+        """
+        if self.transferred < self.transfer_size:
+            data_item = self.get()
+            self.transferred += len(data_item)
+            return data_item
+        else:
+            return ""
+
+    def write(self, data):
+        """Put a data item in the pipe."""
+        self.put(data)
+
+    def seek(self, offset, whence=0):
+        """Set the file's current position at the offset."""
+        pass
+
+    def tell(self):
+        """Get size of the file to be read."""
+        return self.transfer_size
+
+    def close(self):
+        """A place-holder to maintain consistency."""
+        pass
+
+
+class GlanceWriteThread(object):
+    """Ensures that image data is written to in the glance client and that
+    it is in correct ('active')state.
+    """
+
+    def __init__(self, context, input, image_service, image_id,
+                 image_meta=None):
+        if not image_meta:
+            image_meta = {}
+
+        self.context = context
+        self.input = input
+        self.image_service = image_service
+        self.image_id = image_id
+        self.image_meta = image_meta
+        self._running = False
+
+    def start(self):
+        self.done = event.Event()
+
+        def _inner():
+            """Initiate write thread.
+
+            Function to do the image data transfer through an update
+            and thereon checks if the state is 'active'.
+            """
+            self.image_service.update(self.context,
+                                      self.image_id,
+                                      self.image_meta,
+                                      data=self.input)
+            self._running = True
+            while self._running:
+                try:
+                    image_meta = self.image_service.show(self.context,
+                                                         self.image_id)
+                    image_status = image_meta.get('status')
+                    if image_status == 'active':
+                        self.stop()
+                        self.done.send(True)
+                    # If the state is killed, then raise an exception.
+                    elif image_status == 'killed':
+                        self.stop()
+                        msg = (_("Glance image: %s is in killed state.") %
+                               self.image_id)
+                        LOG.error(msg)
+                        excep = exception.CinderException(msg)
+                        self.done.send_exception(excep)
+                    elif image_status in ['saving', 'queued']:
+                        greenthread.sleep(GLANCE_POLL_INTERVAL)
+                    else:
+                        self.stop()
+                        msg = _("Glance image %(id)s is in unknown state "
+                                "- %(state)s") % {'id': self.image_id,
+                                                  'state': image_status}
+                        LOG.error(msg)
+                        excep = exception.CinderException(msg)
+                        self.done.send_exception(excep)
+                except Exception as exc:
+                    self.stop()
+                    self.done.send_exception(exc)
+
+        greenthread.spawn(_inner)
+        return self.done
+
+    def stop(self):
+        self._running = False
+
+    def wait(self):
+        return self.done.wait()
+
+    def close(self):
+        pass
+
+
+class IOThread(object):
+    """Class that reads chunks from the input file and writes them to the
+    output file till the transfer is completely done.
+    """
+
+    def __init__(self, input, output):
+        self.input = input
+        self.output = output
+        self._running = False
+        self.got_exception = False
+
+    def start(self):
+        self.done = event.Event()
+
+        def _inner():
+            """Read data from the input and write the same to the output."""
+            self._running = True
+            while self._running:
+                try:
+                    data = self.input.read(None)
+                    if not data:
+                        self.stop()
+                        self.done.send(True)
+                    self.output.write(data)
+                    greenthread.sleep(IO_THREAD_SLEEP_TIME)
+                except Exception as exc:
+                    self.stop()
+                    LOG.exception(exc)
+                    self.done.send_exception(exc)
+
+        greenthread.spawn(_inner)
+        return self.done
+
+    def stop(self):
+        self._running = False
+
+    def wait(self):
+        return self.done.wait()
diff --git a/cinder/volume/drivers/vmware/read_write_util.py b/cinder/volume/drivers/vmware/read_write_util.py
new file mode 100644 (file)
index 0000000..53939cf
--- /dev/null
@@ -0,0 +1,186 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 VMware, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Classes to handle image files.
+Collection of classes to handle image upload/download to/from Image service
+(like Glance image storage and retrieval service) from/to VMware server.
+"""
+
+import httplib
+import netaddr
+import urllib
+import urllib2
+import urlparse
+
+from cinder.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+USER_AGENT = 'OpenStack-ESX-Adapter'
+READ_CHUNKSIZE = 65536
+
+
+class GlanceFileRead(object):
+    """Glance file read handler class."""
+
+    def __init__(self, glance_read_iter):
+        self.glance_read_iter = glance_read_iter
+        self.iter = self.get_next()
+
+    def read(self, chunk_size):
+        """Read an item from the queue.
+
+        The chunk size is ignored for the Client ImageBodyIterator
+        uses its own CHUNKSIZE.
+        """
+        try:
+            return self.iter.next()
+        except StopIteration:
+            return ""
+
+    def get_next(self):
+        """Get the next item from the image iterator."""
+        for data in self.glance_read_iter:
+            yield data
+
+    def close(self):
+        """A dummy close just to maintain consistency."""
+        pass
+
+
+class VMwareHTTPFile(object):
+    """Base class for HTTP file."""
+
+    def __init__(self, file_handle):
+        self.eof = False
+        self.file_handle = file_handle
+
+    def set_eof(self, eof):
+        """Set the end of file marker."""
+        self.eof = eof
+
+    def get_eof(self):
+        """Check if the end of file has been reached."""
+        return self.eof
+
+    def close(self):
+        """Close the file handle."""
+        try:
+            self.file_handle.close()
+        except Exception as exc:
+            LOG.exception(exc)
+
+    def __del__(self):
+        """Close the file handle on garbage collection."""
+        self.close()
+
+    def _build_vim_cookie_headers(self, vim_cookies):
+        """Build ESX host session cookie headers."""
+        cookie_header = ""
+        for vim_cookie in vim_cookies:
+            cookie_header = vim_cookie.name + '=' + vim_cookie.value
+            break
+        return cookie_header
+
+    def write(self, data):
+        """Write data to the file."""
+        raise NotImplementedError()
+
+    def read(self, chunk_size):
+        """Read a chunk of data."""
+        raise NotImplementedError()
+
+    def get_size(self):
+        """Get size of the file to be read."""
+        raise NotImplementedError()
+
+    def _is_valid_ipv6(self, address):
+        """Whether given host address is a valid IPv6 address."""
+        try:
+            return netaddr.valid_ipv6(address)
+        except Exception:
+            return False
+
+    def get_soap_url(self, scheme, host):
+        """return IPv4/v6 compatible url constructed for host."""
+        if self._is_valid_ipv6(host):
+            return '%s://[%s]' % (scheme, host)
+        return '%s://%s' % (scheme, host)
+
+
+class VMwareHTTPWriteFile(VMwareHTTPFile):
+    """VMware file write handler class."""
+
+    def __init__(self, host, data_center_name, datastore_name, cookies,
+                 file_path, file_size, scheme='https'):
+        soap_url = self.get_soap_url(scheme, host)
+        base_url = '%s/folder/%s' % (soap_url, file_path)
+        param_list = {'dcPath': data_center_name, 'dsName': datastore_name}
+        base_url = base_url + '?' + urllib.urlencode(param_list)
+        _urlparse = urlparse.urlparse(base_url)
+        scheme, netloc, path, params, query, fragment = _urlparse
+        if scheme == 'http':
+            conn = httplib.HTTPConnection(netloc)
+        elif scheme == 'https':
+            conn = httplib.HTTPSConnection(netloc)
+        conn.putrequest('PUT', path + '?' + query)
+        conn.putheader('User-Agent', USER_AGENT)
+        conn.putheader('Content-Length', file_size)
+        conn.putheader('Cookie', self._build_vim_cookie_headers(cookies))
+        conn.endheaders()
+        self.conn = conn
+        VMwareHTTPFile.__init__(self, conn)
+
+    def write(self, data):
+        """Write to the file."""
+        self.file_handle.send(data)
+
+    def close(self):
+        """Get the response and close the connection."""
+        try:
+            self.conn.getresponse()
+        except Exception as excep:
+            LOG.debug(_("Exception during HTTP connection close in "
+                        "VMwareHTTPWrite. Exception is %s.") % excep)
+        super(VMwareHTTPWriteFile, self).close()
+
+
+class VMwareHTTPReadFile(VMwareHTTPFile):
+    """VMware file read handler class."""
+
+    def __init__(self, host, data_center_name, datastore_name, cookies,
+                 file_path, scheme='https'):
+        soap_url = self.get_soap_url(scheme, host)
+        base_url = '%s/folder/%s' % (soap_url, urllib.pathname2url(file_path))
+        param_list = {'dcPath': data_center_name, 'dsName': datastore_name}
+        base_url = base_url + '?' + urllib.urlencode(param_list)
+        headers = {'User-Agent': USER_AGENT,
+                   'Cookie': self._build_vim_cookie_headers(cookies)}
+        request = urllib2.Request(base_url, None, headers)
+        conn = urllib2.urlopen(request)
+        VMwareHTTPFile.__init__(self, conn)
+
+    def read(self, chunk_size):
+        """Read a chunk of data."""
+        # We are ignoring the chunk size passed for we want the pipe to hold
+        # data items of the chunk-size that Glance Client uses for read
+        # while writing.
+        return self.file_handle.read(READ_CHUNKSIZE)
+
+    def get_size(self):
+        """Get size of the file to be read."""
+        return self.file_handle.headers.get('Content-Length', -1)
index b8b04cca84e06d22db5e41f99354c08b04164bf2..a63d63bcebc847e94f9e6e8573b0e2a751b182d3 100644 (file)
@@ -28,6 +28,7 @@ from cinder.volume import driver
 from cinder.volume.drivers.vmware import api
 from cinder.volume.drivers.vmware import error_util
 from cinder.volume.drivers.vmware import vim
+from cinder.volume.drivers.vmware import vmware_images
 from cinder.volume.drivers.vmware import volumeops
 from cinder.volume import volume_types
 
@@ -63,7 +64,11 @@ vmdk_opts = [
     cfg.StrOpt('vmware_volume_folder',
                default='cinder-volumes',
                help='Name for the folder in the VC datacenter that will '
-                    'contain cinder volumes.')
+                    'contain cinder volumes.'),
+    cfg.IntOpt('vmware_image_transfer_timeout_secs',
+               default=7200,
+               help='Timeout in seconds for VMDK volume transfer between '
+                    'Cinder and Glance.'),
 ]
 
 
@@ -477,22 +482,20 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         :return: Reference to the cloned backing
         """
         src_path_name = self.volumeops.get_path_name(backing)
-        # If we have path like /vmfs/volumes/datastore/vm/vm.vmx
-        # we need to use /vmfs/volumes/datastore/vm/ are src_path
-        splits = src_path_name.split('/')
-        last_split = splits[len(splits) - 1]
-        src_path = src_path_name[:-len(last_split)]
+        (datastore_name,
+         folder_path, filename) = volumeops.split_datastore_path(src_path_name)
         # Pick a datastore where to create the full clone under same host
         host = self.volumeops.get_host(backing)
         (datastores, resource_pool) = self.volumeops.get_dss_rp(host)
         (folder, summary) = self._get_folder_ds_summary(volume['size'],
                                                         resource_pool,
                                                         datastores)
-        dest_path = '[%s] %s' % (summary.name, volume['name'])
+        src_path = '[%s] %s' % (datastore_name, folder_path)
+        dest_path = '[%s] %s/' % (summary.name, volume['name'])
         # Copy source backing files to a destination location
         self.volumeops.copy_backing(src_path, dest_path)
         # Register the backing to the inventory
-        dest_path_name = '%s/%s' % (dest_path, last_split)
+        dest_path_name = '%s%s' % (dest_path, filename)
         clone = self.volumeops.register_backing(dest_path_name,
                                                 volume['name'], folder,
                                                 resource_pool)
@@ -572,6 +575,150 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         """
         self._create_volume_from_snapshot(volume, snapshot)
 
+    def _get_ds_name_flat_vmdk_path(self, backing, vol_name):
+        """Get datastore name and folder path of the flat VMDK of the backing.
+
+        :param backing: Reference to the backing entity
+        :param vol_name: Name of the volume
+        :return: datastore name and folder path of the VMDK of the backing
+        """
+        file_path_name = self.volumeops.get_path_name(backing)
+        (datastore_name,
+         folder_path, _) = volumeops.split_datastore_path(file_path_name)
+        flat_vmdk_path = '%s%s-flat.vmdk' % (folder_path, vol_name)
+        return (datastore_name, flat_vmdk_path)
+
+    @staticmethod
+    def _validate_disk_format(disk_format):
+        """Verify vmdk as disk format.
+
+        :param disk_format: Disk format of the image
+        """
+        if disk_format and disk_format.lower() != 'vmdk':
+            msg = _("Cannot create image of disk format: %s. Only vmdk "
+                    "disk format is accepted.") % disk_format
+            LOG.error(msg)
+            raise exception.ImageUnacceptable(msg)
+
+    def copy_image_to_volume(self, context, volume, image_service, image_id):
+        """Creates volume from image.
+
+        Creates a backing for the volume under the ESX/VC server and
+        copies the VMDK flat file from the glance image content.
+        The method supports only image with VMDK disk format.
+
+        :param context: context
+        :param volume: Volume object
+        :param image_service: Glance image service
+        :param image_id: Glance image id
+        """
+        LOG.debug(_("Copy glance image: %s to create new volume.") % image_id)
+
+        # Verify glance image is vmdk disk format
+        metadata = image_service.show(context, image_id)
+        disk_format = metadata['disk_format']
+        VMwareEsxVmdkDriver._validate_disk_format(disk_format)
+
+        # Set volume size in GB from image metadata
+        volume['size'] = float(metadata['size']) / units.GiB
+        # First create empty backing in the inventory
+        backing = self._create_backing_in_inventory(volume)
+
+        try:
+            (datastore_name,
+             flat_vmdk_path) = self._get_ds_name_flat_vmdk_path(backing,
+                                                                volume['name'])
+            host = self.volumeops.get_host(backing)
+            datacenter = self.volumeops.get_dc(host)
+            datacenter_name = self.volumeops.get_entity_name(datacenter)
+            flat_vmdk_ds_path = '[%s] %s' % (datastore_name, flat_vmdk_path)
+            # Delete the *-flat.vmdk file within the backing
+            self.volumeops.delete_file(flat_vmdk_ds_path, datacenter)
+
+            # copy over image from glance into *-flat.vmdk
+            timeout = self.configuration.vmware_image_transfer_timeout_secs
+            host_ip = self.configuration.vmware_host_ip
+            cookies = self.session.vim.client.options.transport.cookiejar
+            LOG.debug(_("Fetching glance image: %(id)s to server: %(host)s.") %
+                      {'id': image_id, 'host': host_ip})
+            vmware_images.fetch_image(context, timeout, image_service,
+                                      image_id, host=host_ip,
+                                      data_center_name=datacenter_name,
+                                      datastore_name=datastore_name,
+                                      cookies=cookies,
+                                      file_path=flat_vmdk_path)
+            LOG.info(_("Done copying image: %(id)s to volume: %(vol)s.") %
+                     {'id': image_id, 'vol': volume['name']})
+        except Exception as excep:
+            LOG.exception(_("Exception in copy_image_to_volume: %(excep)s. "
+                            "Deleting the backing: %(back)s.") %
+                          {'excep': excep, 'back': backing})
+            # delete the backing
+            self.volumeops.delete_backing(backing)
+            raise excep
+
+    def copy_volume_to_image(self, context, volume, image_service, image_meta):
+        """Creates glance image from volume.
+
+        Steps followed are:
+
+        1. Get the name of the vmdk file which the volume points to right now.
+           Can be a chain of snapshots, so we need to know the last in the
+           chain.
+        2. Create the snapshot. A new vmdk is created which the volume points
+           to now. The earlier vmdk becomes read-only.
+        3. Call CopyVirtualDisk which coalesces the disk chain to form a
+           single vmdk, rather a .vmdk metadata file and a -flat.vmdk disk
+           data file.
+        4. Now upload the -flat.vmdk file to the image store.
+        5. Delete the coalesced .vmdk and -flat.vmdk created.
+        """
+        LOG.debug(_("Copy Volume: %s to new image.") % volume['name'])
+        VMwareEsxVmdkDriver._validate_disk_format(image_meta['disk_format'])
+
+        backing = self.volumeops.get_backing(volume['name'])
+        if not backing:
+            LOG.info(_("Backing not found, creating for volume: %s") %
+                     volume['name'])
+            backing = self._create_backing_in_inventory(volume)
+
+        vmdk_file_path = self.volumeops.get_vmdk_path(backing)
+        datastore_name = volumeops.split_datastore_path(vmdk_file_path)[0]
+
+        # Create a snapshot
+        image_id = image_meta['id']
+        snapshot_name = "snapshot-%s" % image_id
+        self.volumeops.create_snapshot(backing, snapshot_name, None, True)
+
+        # Create a copy of the snapshotted vmdk into a tmp file
+        tmp_vmdk_file_path = '[%s] %s.vmdk' % (datastore_name, image_id)
+        host = self.volumeops.get_host(backing)
+        datacenter = self.volumeops.get_dc(host)
+        self.volumeops.copy_vmdk_file(datacenter, vmdk_file_path,
+                                      tmp_vmdk_file_path)
+        try:
+            # Upload image from copy of -flat.vmdk
+            timeout = self.configuration.vmware_image_transfer_timeout_secs
+            host_ip = self.configuration.vmware_host_ip
+            datacenter_name = self.volumeops.get_entity_name(datacenter)
+            cookies = self.session.vim.client.options.transport.cookiejar
+            flat_vmdk_copy = '%s-flat.vmdk' % image_id
+
+            vmware_images.upload_image(context, timeout, image_service,
+                                       image_meta['id'],
+                                       volume['project_id'], host=host_ip,
+                                       data_center_name=datacenter_name,
+                                       datastore_name=datastore_name,
+                                       cookies=cookies,
+                                       file_path=flat_vmdk_copy,
+                                       snapshot_name=image_meta['name'],
+                                       image_version=1)
+            LOG.info(_("Done copying volume %(vol)s to a new image %(img)s") %
+                     {'vol': volume['name'], 'img': image_meta['name']})
+        finally:
+            # Delete the coalesced .vmdk and -flat.vmdk created
+            self.volumeops.delete_vmdk_file(tmp_vmdk_file_path, datacenter)
+
 
 class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
     """Manage volumes on VMware VC server."""
diff --git a/cinder/volume/drivers/vmware/vmware_images.py b/cinder/volume/drivers/vmware/vmware_images.py
new file mode 100644 (file)
index 0000000..98fa5d6
--- /dev/null
@@ -0,0 +1,135 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 VMware, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+Utility functions for Image transfer.
+"""
+
+from eventlet import timeout
+
+from cinder import exception
+from cinder.openstack.common import log as logging
+from cinder.volume.drivers.vmware import io_util
+from cinder.volume.drivers.vmware import read_write_util as rw_util
+
+LOG = logging.getLogger(__name__)
+
+QUEUE_BUFFER_SIZE = 10
+
+
+def start_transfer(context, timeout_secs, read_file_handle, data_size,
+                   write_file_handle=None, image_service=None, image_id=None,
+                   image_meta=None):
+    """Start the data transfer from the reader to the writer.
+
+    Reader writes to the pipe and the writer reads from the pipe. This means
+    that the total transfer time boils down to the slower of the read/write
+    and not the addition of the two times.
+    """
+
+    if not image_meta:
+        image_meta = {}
+
+    # The pipe that acts as an intermediate store of data for reader to write
+    # to and writer to grab from.
+    thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, data_size)
+    # The read thread. In case of glance it is the instance of the
+    # GlanceFileRead class. The glance client read returns an iterator
+    # and this class wraps that iterator to provide datachunks in calls
+    # to read.
+    read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)
+
+    # In case of Glance - VMware transfer, we just need a handle to the
+    # HTTP Connection that is to send transfer data to the VMware datastore.
+    if write_file_handle:
+        write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
+    # In case of VMware - Glance transfer, we relinquish VMware HTTP file read
+    # handle to Glance Client instance, but to be sure of the transfer we need
+    # to be sure of the status of the image on glance changing to active.
+    # The GlanceWriteThread handles the same for us.
+    elif image_service and image_id:
+        write_thread = io_util.GlanceWriteThread(context, thread_safe_pipe,
+                                                 image_service, image_id,
+                                                 image_meta)
+    # Start the read and write threads.
+    read_event = read_thread.start()
+    write_event = write_thread.start()
+    timer = timeout.Timeout(timeout_secs)
+    try:
+        # Wait on the read and write events to signal their end
+        read_event.wait()
+        write_event.wait()
+    except (timeout.Timeout, Exception) as exc:
+        # In case of any of the reads or writes raising an exception,
+        # stop the threads so that we un-necessarily don't keep the other one
+        # waiting.
+        read_thread.stop()
+        write_thread.stop()
+
+        # Log and raise the exception.
+        LOG.exception(exc)
+        raise exception.CinderException(exc)
+    finally:
+        timer.cancel()
+        # No matter what, try closing the read and write handles, if it so
+        # applies.
+        read_file_handle.close()
+        if write_file_handle:
+            write_file_handle.close()
+
+
+def fetch_image(context, timeout_secs, image_service, image_id, **kwargs):
+    """Download image from the glance image server."""
+    LOG.debug(_("Downloading image: %s from glance image server.") % image_id)
+    metadata = image_service.show(context, image_id)
+    file_size = int(metadata['size'])
+    read_iter = image_service.download(context, image_id)
+    read_handle = rw_util.GlanceFileRead(read_iter)
+    write_handle = rw_util.VMwareHTTPWriteFile(kwargs.get('host'),
+                                               kwargs.get('data_center_name'),
+                                               kwargs.get('datastore_name'),
+                                               kwargs.get('cookies'),
+                                               kwargs.get('file_path'),
+                                               file_size)
+    start_transfer(context, timeout_secs, read_handle, file_size,
+                   write_file_handle=write_handle)
+    LOG.info(_("Downloaded image: %s from glance image server.") % image_id)
+
+
+def upload_image(context, timeout_secs, image_service, image_id, owner_id,
+                 **kwargs):
+    """Upload the snapshot vm disk file to Glance image server."""
+    LOG.debug(_("Uploading image: %s to the Glance image server.") % image_id)
+    read_handle = rw_util.VMwareHTTPReadFile(kwargs.get('host'),
+                                             kwargs.get('data_center_name'),
+                                             kwargs.get('datastore_name'),
+                                             kwargs.get('cookies'),
+                                             kwargs.get('file_path'))
+    file_size = read_handle.get_size()
+    # The properties and other fields that we need to set for the image.
+    image_metadata = {'disk_format': 'vmdk',
+                      'is_public': 'false',
+                      'name': kwargs.get('snapshot_name'),
+                      'status': 'active',
+                      'container_format': 'bare',
+                      'size': file_size,
+                      'properties': {'vmware_image_version':
+                                     kwargs.get('image_version'),
+                                     'owner_id': owner_id}}
+    start_transfer(context, timeout_secs, read_handle, file_size,
+                   image_service=image_service, image_id=image_id,
+                   image_meta=image_metadata)
+    LOG.info(_("Uploaded image: %s to the Glance image server.") % image_id)
index 348af8a89229a5c29ea7e770ddfedb6dabcdd6d4..aee517c31e4c3f3afecc3dbfeaefdf86cbf110a3 100644 (file)
@@ -30,6 +30,34 @@ ALREADY_EXISTS = 'AlreadyExists'
 FILE_ALREADY_EXISTS = 'FileAlreadyExists'
 
 
+def split_datastore_path(datastore_path):
+    """Split the datastore path to components.
+
+    return the datastore name, relative folder path and the file name
+
+    E.g. datastore_path = [datastore1] my_volume/my_volume.vmdk, returns
+    (datastore1, my_volume/, my_volume.vmdk)
+
+    :param datastore_path: Datastore path of a file
+    :return: Parsed datastore name, relative folder path and file name
+    """
+    splits = datastore_path.split('[', 1)[1].split(']', 1)
+    datastore_name = None
+    folder_path = None
+    file_name = None
+    if len(splits) == 1:
+        datastore_name = splits[0]
+    else:
+        datastore_name, path = splits
+        # Path will be of form my_volume/my_volume.vmdk
+        # we need into my_volumes/ and my_volume.vmdk
+        splits = path.split('/')
+        file_name = splits[len(splits) - 1]
+        folder_path = path[:-len(file_name)]
+
+    return (datastore_name.strip(), folder_path.strip(), file_name.strip())
+
+
 class VMwareVolumeOps(object):
     """Manages volume operations."""
 
@@ -174,11 +202,7 @@ class VMwareVolumeOps(object):
         for child_entity in child_entities:
             if child_entity._type != 'Folder':
                 continue
-            child_entity_name = self._session.invoke_api(vim_util,
-                                                         'get_object_property',
-                                                         self._session.vim,
-                                                         child_entity,
-                                                         'name')
+            child_entity_name = self.get_entity_name(child_entity)
             if child_entity_name == child_folder_name:
                 LOG.debug(_("Child folder already present: %s.") %
                           child_entity)
@@ -210,7 +234,7 @@ class VMwareVolumeOps(object):
         controller_spec.device = controller_device
 
         disk_device = cf.create('ns0:VirtualDisk')
-        disk_device.capacityInKB = size_kb
+        disk_device.capacityInKB = int(size_kb)
         disk_device.key = -101
         disk_device.unitNumber = 0
         disk_device.controllerKey = -100
@@ -358,12 +382,13 @@ class VMwareVolumeOps(object):
         LOG.info(_("Successfully moved volume backing: %(backing)s into the "
                    "folder: %(fol)s.") % {'backing': backing, 'fol': folder})
 
-    def create_snapshot(self, backing, name, description):
+    def create_snapshot(self, backing, name, description, quiesce=False):
         """Create snapshot of the backing with given name and description.
 
         :param backing: Reference to the backing entity
         :param name: Snapshot name
         :param description: Snapshot description
+        :param quiesce: Whether to quiesce the backing when taking snapshot
         :return: Created snapshot entity reference
         """
         LOG.debug(_("Snapshoting backing: %(backing)s with name: %(name)s.") %
@@ -372,7 +397,7 @@ class VMwareVolumeOps(object):
                                         'CreateSnapshot_Task',
                                         backing, name=name,
                                         description=description,
-                                        memory=False, quiesce=False)
+                                        memory=False, quiesce=quiesce)
         LOG.debug(_("Initiated snapshot of volume backing: %(backing)s "
                     "named: %(name)s.") % {'backing': backing, 'name': name})
         task_info = self._session.wait_for_task(task)
@@ -505,7 +530,7 @@ class VMwareVolumeOps(object):
         LOG.info(_("Successfully created clone: %s.") % new_backing)
         return new_backing
 
-    def _delete_file(self, file_path, datacenter=None):
+    def delete_file(self, file_path, datacenter=None):
         """Delete file or folder on the datastore.
 
         :param file_path: Datastore path of the file or folder
@@ -549,7 +574,7 @@ class VMwareVolumeOps(object):
                 raise excep
             # There might be files on datastore due to previous failed attempt
             # We clean the folder up and retry the copy
-            self._delete_file(dest_folder_path)
+            self.delete_file(dest_folder_path)
             self.copy_backing(src_folder_path, dest_folder_path)
 
     def get_path_name(self, backing):
@@ -604,3 +629,76 @@ class VMwareVolumeOps(object):
         LOG.debug(_("Initiated reverting snapshot via task: %s.") % task)
         self._session.wait_for_task(task)
         LOG.info(_("Successfully reverted to snapshot: %s.") % snapshot)
+
+    def get_entity_name(self, entity):
+        """Get name of the managed entity.
+
+        :param entity: Reference to the entity
+        :return: Name of the managed entity
+        """
+        return self._session.invoke_api(vim_util, 'get_object_property',
+                                        self._session.vim, entity, 'name')
+
+    def get_vmdk_path(self, backing):
+        """Get the vmdk file name of the backing.
+
+        The vmdk file path of the backing returned is of the form:
+        "[datastore1] my_folder/my_vm.vmdk"
+
+        :param backing: Reference to the backing
+        :return: VMDK file path of the backing
+        """
+        hardware_devices = self._session.invoke_api(vim_util,
+                                                    'get_object_property',
+                                                    self._session.vim,
+                                                    backing,
+                                                    'config.hardware.device')
+        if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
+            hardware_devices = hardware_devices.VirtualDevice
+        for device in hardware_devices:
+            if device.__class__.__name__ == "VirtualDisk":
+                bkng = device.backing
+                if bkng.__class__.__name__ == "VirtualDiskFlatVer2BackingInfo":
+                    return bkng.fileName
+
+    def copy_vmdk_file(self, dc_ref, src_vmdk_file_path, dest_vmdk_file_path):
+        """Copy contents of the src vmdk file to dest vmdk file.
+
+        During the copy also coalesce snapshots of src if present.
+        dest_vmdk_file_path will be created if not already present.
+
+        :param dc_ref: Reference to datacenter containing src and dest
+        :param src_vmdk_file_path: Source vmdk file path
+        :param dest_vmdk_file_path: Destination vmdk file path
+        """
+        LOG.debug(_('Copying disk data before snapshot of the VM'))
+        diskMgr = self._session.vim.service_content.virtualDiskManager
+        task = self._session.invoke_api(self._session.vim,
+                                        'CopyVirtualDisk_Task',
+                                        diskMgr,
+                                        sourceName=src_vmdk_file_path,
+                                        sourceDatacenter=dc_ref,
+                                        destName=dest_vmdk_file_path,
+                                        destDatacenter=dc_ref,
+                                        force=True)
+        LOG.debug(_("Initiated copying disk data via task: %s.") % task)
+        self._session.wait_for_task(task)
+        LOG.info(_("Successfully copied disk data to: %s.") %
+                 dest_vmdk_file_path)
+
+    def delete_vmdk_file(self, vmdk_file_path, dc_ref):
+        """Delete given vmdk files.
+
+        :param vmdk_file_path: VMDK file path to be deleted
+        :param dc_ref: Reference to datacenter that contains this VMDK file
+        """
+        LOG.debug(_("Deleting vmdk file: %s.") % vmdk_file_path)
+        diskMgr = self._session.vim.service_content.virtualDiskManager
+        task = self._session.invoke_api(self._session.vim,
+                                        'DeleteVirtualDisk_Task',
+                                        diskMgr,
+                                        name=vmdk_file_path,
+                                        datacenter=dc_ref)
+        LOG.debug(_("Initiated deleting vmdk file via task: %s.") % task)
+        self._session.wait_for_task(task)
+        LOG.info(_("Deleted vmdk file: %s.") % vmdk_file_path)
index c4d58806100f87ee5d31ba5b890d17a74b75807c..36d6de7a6f329d4e6949bcd70a78b490fdddeaa8 100644 (file)
 #db_driver=cinder.db
 
 
+#
+# Options defined in cinder.image.glance
+#
+
+# A list of url schemes that can be downloaded directly via
+# the direct_url.  Currently supported schemes: [file]. (list
+# value)
+#allowed_direct_url_schemes=
+
+
 #
 # Options defined in cinder.image.image_utils
 #
 # cinder volumes. (string value)
 #vmware_volume_folder=cinder-volumes
 
+# Timeout in seconds for VMDK volume transfer between Cinder
+# and Glance. (integer value)
+#vmware_image_transfer_timeout_secs=7200
+
 
 #
 # Options defined in cinder.volume.drivers.windows.windows
 #volume_dd_blocksize=1M
 
 
-# Total option count: 364
+# Total option count: 366