--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 VMware, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suite for VMware VMDK driver.
+"""
+
+import mox
+
+from cinder import exception
+from cinder import test
+from cinder import units
+from cinder.volume import configuration
+from cinder.volume.drivers.vmware import api
+from cinder.volume.drivers.vmware import error_util
+from cinder.volume.drivers.vmware import vim_util
+from cinder.volume.drivers.vmware import vmdk
+from cinder.volume.drivers.vmware import volumeops
+
+
+class FakeVim(object):
+ @property
+ def service_content(self):
+ return mox.MockAnything()
+
+ def Login(self, session_manager, userName, password):
+ return mox.MockAnything()
+
+
+class FakeTaskInfo(object):
+ def __init__(self, state, result=None):
+ self.state = state
+ self.result = result
+
+ class FakeError(object):
+ def __init__(self):
+ self.localizedMessage = None
+
+ self.error = FakeError()
+
+
+class FakeMor(object):
+ def __init__(self, type, val):
+ self._type = type
+ self.value = val
+
+
+class FakeObject(object):
+ fields = {}
+
+ def __setitem__(self, key, value):
+ self.fields[key] = value
+
+ def __getitem__(self, item):
+ return self.fields[item]
+
+
+class FakeManagedObjectReference(object):
+ def __init__(self, lis=[]):
+ self.ManagedObjectReference = lis
+
+
+class FakeDatastoreSummary(object):
+ def __init__(self, freeSpace, capacity, datastore=None, name=None):
+ self.freeSpace = freeSpace
+ self.capacity = capacity
+ self.datastore = datastore
+ self.name = name
+
+
+class FakeSnapshotTree(object):
+ def __init__(self, tree=None, name=None,
+ snapshot=None, childSnapshotList=None):
+ self.rootSnapshotList = tree
+ self.name = name
+ self.snapshot = snapshot
+ self.childSnapshotList = childSnapshotList
+
+
+class VMwareEsxVmdkDriverTestCase(test.TestCase):
+ """Test class for VMwareEsxVmdkDriver."""
+
+ IP = 'localhost'
+ USERNAME = 'username'
+ PASSWORD = 'password'
+ VOLUME_FOLDER = 'cinder-volumes'
+ API_RETRY_COUNT = 3
+ TASK_POLL_INTERVAL = 5.0
+
+ def setUp(self):
+ super(VMwareEsxVmdkDriverTestCase, self).setUp()
+ self._config = mox.MockObject(configuration.Configuration)
+ self._config.append_config_values(mox.IgnoreArg())
+ self._config.vmware_host_ip = self.IP
+ self._config.vmware_host_username = self.USERNAME
+ self._config.vmware_host_password = self.PASSWORD
+ self._config.vmware_wsdl_location = None
+ self._config.vmware_volume_folder = self.VOLUME_FOLDER
+ self._config.vmware_api_retry_count = self.API_RETRY_COUNT
+ self._config.vmware_task_poll_interval = self.TASK_POLL_INTERVAL
+ self._driver = vmdk.VMwareEsxVmdkDriver(configuration=self._config)
+ api_retry_count = self._config.vmware_api_retry_count,
+ task_poll_interval = self._config.vmware_task_poll_interval,
+ self._session = api.VMwareAPISession(self.IP, self.USERNAME,
+ self.PASSWORD, api_retry_count,
+ task_poll_interval,
+ create_session=False)
+ self._volumeops = volumeops.VMwareVolumeOps(self._session)
+ self._vim = FakeVim()
+
+ def test_retry(self):
+ """Test Retry."""
+
+ class TestClass(object):
+
+ def __init__(self):
+ self.counter1 = 0
+
+ @api.Retry(max_retry_count=2, inc_sleep_time=0.001,
+ exceptions=(Exception))
+ def fail(self):
+ self.counter1 += 1
+ raise exception.CinderException('Fail')
+
+ test_obj = TestClass()
+ self.assertRaises(exception.CinderException, test_obj.fail)
+ self.assertEquals(test_obj.counter1, 3)
+
+ def test_create_session(self):
+ """Test create_session."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.ReplayAll()
+ self._session.create_session()
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_do_setup(self):
+ """Test do_setup."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'session')
+ self._driver.session = self._session
+ m.ReplayAll()
+ self._driver.do_setup(mox.IgnoreArg())
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_check_for_setup_error(self):
+ """Test check_for_setup_error."""
+ self._driver.check_for_setup_error()
+
+ def test_get_volume_stats(self):
+ """Test get_volume_stats."""
+ stats = self._driver.get_volume_stats()
+ self.assertEquals(stats['vendor_name'], 'VMware')
+ self.assertEquals(stats['driver_version'], '1.0')
+ self.assertEquals(stats['storage_protocol'], 'LSI Logic SCSI')
+ self.assertEquals(stats['reserved_percentage'], 0)
+ self.assertEquals(stats['total_capacity_gb'], 'unknown')
+ self.assertEquals(stats['free_capacity_gb'], 'unknown')
+
+ def test_create_volume(self):
+ """Test create_volume."""
+ self._driver.create_volume(mox.IgnoreArg())
+
+ def test_success_wait_for_task(self):
+ """Test successful wait_for_task."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ result = FakeMor('VirtualMachine', 'my_vm')
+ success_task_info = FakeTaskInfo('success', result=result)
+ m.StubOutWithMock(vim_util, 'get_object_property')
+ vim_util.get_object_property(self._session.vim,
+ mox.IgnoreArg(),
+ 'info').AndReturn(success_task_info)
+
+ m.ReplayAll()
+ ret = self._session.wait_for_task(mox.IgnoreArg())
+ self.assertEquals(ret.result, result)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_failed_wait_for_task(self):
+ """Test failed wait_for_task."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ failed_task_info = FakeTaskInfo('failed')
+ m.StubOutWithMock(vim_util, 'get_object_property')
+ vim_util.get_object_property(self._session.vim,
+ mox.IgnoreArg(),
+ 'info').AndReturn(failed_task_info)
+
+ m.ReplayAll()
+ self.assertRaises(error_util.VimFaultException,
+ self._session.wait_for_task,
+ mox.IgnoreArg())
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_get_backing(self):
+ """Test get_backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ self._session.invoke_api(vim_util, 'get_objects',
+ self._vim, 'VirtualMachine').AndReturn([])
+
+ m.ReplayAll()
+ self._volumeops.get_backing(mox.IgnoreArg())
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_delete_backing(self):
+ """Test delete_backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ backing = FakeMor('VirtualMachine', 'my_vm')
+ self._session.invoke_api(self._vim, 'Destroy_Task', backing)
+ m.StubOutWithMock(self._session, 'wait_for_task')
+ self._session.wait_for_task(mox.IgnoreArg())
+
+ m.ReplayAll()
+ self._volumeops.delete_backing(backing)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_delete_volume_without_backing(self):
+ """Test delete_volume without backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ self._volumeops.get_backing('hello_world').AndReturn(None)
+
+ m.ReplayAll()
+ volume = FakeObject()
+ volume['name'] = 'hello_world'
+ self._driver.delete_volume(volume)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_delete_volume_with_backing(self):
+ """Test delete_volume with backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+
+ backing = FakeMor('VirtualMachine', 'my_vm')
+ task = FakeMor('Task', 'my_task')
+
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ m.StubOutWithMock(self._volumeops, 'delete_backing')
+ self._volumeops.get_backing('hello_world').AndReturn(backing)
+ self._volumeops.delete_backing(backing)
+
+ m.ReplayAll()
+ volume = FakeObject()
+ volume['name'] = 'hello_world'
+ self._driver.delete_volume(volume)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_create_export(self):
+ """Test create_export."""
+ self._driver.create_export(mox.IgnoreArg(), mox.IgnoreArg())
+
+ def test_ensure_export(self):
+ """Test ensure_export."""
+ self._driver.ensure_export(mox.IgnoreArg(), mox.IgnoreArg())
+
+ def test_remove_export(self):
+ """Test remove_export."""
+ self._driver.remove_export(mox.IgnoreArg(), mox.IgnoreArg())
+
+ def test_terminate_connection(self):
+ """Test terminate_connection."""
+ self._driver.terminate_connection(mox.IgnoreArg(), mox.IgnoreArg(),
+ force=mox.IgnoreArg())
+
+ def test_get_host(self):
+ """Test get_host."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ instance = FakeObject()
+ self._session.invoke_api(vim_util, 'get_object_property',
+ self._vim, instance, 'runtime.host')
+
+ m.ReplayAll()
+ self._volumeops.get_host(instance)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_get_hosts(self):
+ """Test get_hosts."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ self._session.invoke_api(vim_util, 'get_objects',
+ self._vim, 'HostSystem')
+
+ m.ReplayAll()
+ self._volumeops.get_hosts()
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_get_dss_rp(self):
+ """Test get_dss_rp."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ host = FakeObject()
+ self._session.invoke_api(vim_util, 'get_object_properties',
+ self._vim, host,
+ ['datastore', 'parent']).AndReturn([])
+ self._session.invoke_api(vim_util, 'get_object_property',
+ self._vim, mox.IgnoreArg(), 'resourcePool')
+
+ m.ReplayAll()
+ self.assertRaises(error_util.VimException, self._volumeops.get_dss_rp,
+ host)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_get_parent(self):
+ """Test get_parent."""
+ # Not recursive
+ child = FakeMor('Parent', 'my_parent')
+ parent = self._volumeops._get_parent(child, 'Parent')
+ self.assertEquals(parent, child)
+
+ # Recursive
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ parent = FakeMor('Parent', 'my_parent1')
+ child = FakeMor('Child', 'my_child')
+ self._session.invoke_api(vim_util, 'get_object_property', self._vim,
+ child, 'parent').AndReturn(parent)
+
+ m.ReplayAll()
+ ret = self._volumeops._get_parent(child, 'Parent')
+ self.assertEquals(ret, parent)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_get_dc(self):
+ """Test get_dc."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._volumeops, '_get_parent')
+ self._volumeops._get_parent(mox.IgnoreArg(), 'Datacenter')
+
+ m.ReplayAll()
+ self._volumeops.get_dc(mox.IgnoreArg())
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_get_vmfolder(self):
+ """Test get_vmfolder."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ datacenter = FakeMor('Datacenter', 'my_dc')
+ self._session.invoke_api(vim_util, 'get_object_property', self._vim,
+ datacenter, 'vmFolder')
+
+ m.ReplayAll()
+ dc = self._volumeops.get_vmfolder(datacenter)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_create_backing(self):
+ """Test create_backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ folder = FakeMor('Folder', 'my_fol')
+ resource_pool = FakeMor('ResourcePool', 'my_rs')
+ host = FakeMor('HostSystem', 'my_host')
+ task = FakeMor('Task', 'my_task')
+ self._session.invoke_api(self._vim, 'CreateVM_Task', folder,
+ config=mox.IgnoreArg(), pool=resource_pool,
+ host=host).AndReturn(task)
+ m.StubOutWithMock(self._session, 'wait_for_task')
+ task_info = FakeTaskInfo('success', mox.IgnoreArg())
+ self._session.wait_for_task(task).AndReturn(task_info)
+ name = 'my_vm'
+ size_kb = 1 * units.MiB
+ disk_type = 'thick'
+ ds_name = 'my_ds'
+ m.StubOutWithMock(self._volumeops, '_get_create_spec')
+ self._volumeops._get_create_spec(name, size_kb, disk_type, ds_name)
+
+ m.ReplayAll()
+ self._volumeops.create_backing(name, size_kb, disk_type, folder,
+ resource_pool, host, ds_name)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_get_datastore(self):
+ """Test get_datastore."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ backing = FakeMor('VirtualMachine', 'my_back')
+ datastore = FakeMor('Datastore', 'my_ds')
+ datastores = FakeManagedObjectReference([datastore])
+ self._session.invoke_api(vim_util, 'get_object_property', self._vim,
+ backing, 'datastore').AndReturn(datastores)
+
+ m.ReplayAll()
+ result = self._volumeops.get_datastore(backing)
+ self.assertEquals(result, datastore)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_get_summary(self):
+ """Test get_summary."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ datastore = FakeMor('Datastore', 'my_ds')
+ self._session.invoke_api(vim_util, 'get_object_property', self._vim,
+ datastore, 'summary')
+
+ m.ReplayAll()
+ self._volumeops.get_summary(datastore)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_init_conn_with_instance_and_backing(self):
+ """Test initialize_connection with instance and backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ volume = FakeObject()
+ volume['name'] = 'volume_name'
+ volume['id'] = 'volume_id'
+ volume['size'] = 1
+ connector = {'instance': 'my_instance'}
+ backing = FakeMor('VirtualMachine', 'my_back')
+ self._volumeops.get_backing(volume['name']).AndReturn(backing)
+ m.StubOutWithMock(self._volumeops, 'get_host')
+ host = FakeMor('HostSystem', 'my_host')
+ self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
+
+ m.ReplayAll()
+ conn_info = self._driver.initialize_connection(volume, connector)
+ self.assertEquals(conn_info['driver_volume_type'], 'vmdk')
+ self.assertEquals(conn_info['data']['volume'], 'my_back')
+ self.assertEquals(conn_info['data']['volume_id'], 'volume_id')
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_get_volume_group_folder(self):
+ """Test _get_volume_group_folder."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ datacenter = FakeMor('Datacenter', 'my_dc')
+ m.StubOutWithMock(self._volumeops, 'get_vmfolder')
+ self._volumeops.get_vmfolder(datacenter)
+
+ m.ReplayAll()
+ self._driver._get_volume_group_folder(datacenter)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_select_datastore_summary(self):
+ """Test _select_datastore_summary."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ datastore1 = FakeMor('Datastore', 'my_ds_1')
+ datastore2 = FakeMor('Datastore', 'my_ds_2')
+ datastore3 = FakeMor('Datastore', 'my_ds_3')
+ datastore4 = FakeMor('Datastore', 'my_ds_4')
+ datastores = [datastore1, datastore2, datastore3, datastore4]
+ m.StubOutWithMock(self._volumeops, 'get_summary')
+ summary1 = FakeDatastoreSummary(10, 10)
+ summary2 = FakeDatastoreSummary(25, 50)
+ summary3 = FakeDatastoreSummary(50, 50)
+ summary4 = FakeDatastoreSummary(100, 100)
+ moxd = self._volumeops.get_summary(datastore1)
+ moxd.MultipleTimes().AndReturn(summary1)
+ moxd = self._volumeops.get_summary(datastore2)
+ moxd.MultipleTimes().AndReturn(summary2)
+ moxd = self._volumeops.get_summary(datastore3)
+ moxd.MultipleTimes().AndReturn(summary3)
+ moxd = self._volumeops.get_summary(datastore4)
+ moxd.MultipleTimes().AndReturn(summary4)
+
+ m.ReplayAll()
+ summary = self._driver._select_datastore_summary(1, datastores)
+ self.assertEquals(summary, summary1)
+ summary = self._driver._select_datastore_summary(10, datastores)
+ self.assertEquals(summary, summary3)
+ summary = self._driver._select_datastore_summary(50, datastores)
+ self.assertEquals(summary, summary4)
+ self.assertRaises(error_util.VimException,
+ self._driver._select_datastore_summary,
+ 100, datastores)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_get_folder_ds_summary(self):
+ """Test _get_folder_ds_summary."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ size = 1
+ resource_pool = FakeMor('ResourcePool', 'my_rp')
+ datacenter = FakeMor('Datacenter', 'my_dc')
+ m.StubOutWithMock(self._volumeops, 'get_dc')
+ self._volumeops.get_dc(resource_pool).AndReturn(datacenter)
+ m.StubOutWithMock(self._driver, '_get_volume_group_folder')
+ folder = FakeMor('Folder', 'my_fol')
+ self._driver._get_volume_group_folder(datacenter).AndReturn(folder)
+ m.StubOutWithMock(self._driver, '_select_datastore_summary')
+ size = 1
+ datastores = [FakeMor('Datastore', 'my_ds')]
+ self._driver._select_datastore_summary(size * units.GiB, datastores)
+
+ m.ReplayAll()
+ self._driver._get_folder_ds_summary(size, resource_pool, datastores)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_get_disk_type(self):
+ """Test _get_disk_type."""
+ volume = FakeObject()
+ volume['volume_type_id'] = None
+ self.assertEquals(vmdk.VMwareEsxVmdkDriver._get_disk_type(volume),
+ 'thin')
+
+ def test_init_conn_with_instance_no_backing(self):
+ """Test initialize_connection with instance and without backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ volume = FakeObject()
+ volume['name'] = 'volume_name'
+ volume['id'] = 'volume_id'
+ volume['size'] = 1
+ volume['volume_type_id'] = None
+ connector = {'instance': 'my_instance'}
+ self._volumeops.get_backing(volume['name'])
+ m.StubOutWithMock(self._volumeops, 'get_host')
+ host = FakeMor('HostSystem', 'my_host')
+ self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
+ m.StubOutWithMock(self._volumeops, 'get_dss_rp')
+ resource_pool = FakeMor('ResourcePool', 'my_rp')
+ datastores = [FakeMor('Datastore', 'my_ds')]
+ self._volumeops.get_dss_rp(host).AndReturn((datastores, resource_pool))
+ m.StubOutWithMock(self._driver, '_get_folder_ds_summary')
+ folder = FakeMor('Folder', 'my_fol')
+ summary = FakeDatastoreSummary(1, 1)
+ self._driver._get_folder_ds_summary(volume['size'], resource_pool,
+ datastores).AndReturn((folder,
+ summary))
+ backing = FakeMor('VirtualMachine', 'my_back')
+ m.StubOutWithMock(self._volumeops, 'create_backing')
+ self._volumeops.create_backing(volume['name'],
+ volume['size'] * units.MiB,
+ mox.IgnoreArg(), folder,
+ resource_pool, host,
+ mox.IgnoreArg()).AndReturn(backing)
+
+ m.ReplayAll()
+ conn_info = self._driver.initialize_connection(volume, connector)
+ self.assertEquals(conn_info['driver_volume_type'], 'vmdk')
+ self.assertEquals(conn_info['data']['volume'], 'my_back')
+ self.assertEquals(conn_info['data']['volume_id'], 'volume_id')
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_init_conn_without_instance(self):
+ """Test initialize_connection without instance and a backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ backing = FakeMor('VirtualMachine', 'my_back')
+ volume = FakeObject()
+ volume['name'] = 'volume_name'
+ volume['id'] = 'volume_id'
+ connector = {}
+ self._volumeops.get_backing(volume['name']).AndReturn(backing)
+
+ m.ReplayAll()
+ conn_info = self._driver.initialize_connection(volume, connector)
+ self.assertEquals(conn_info['driver_volume_type'], 'vmdk')
+ self.assertEquals(conn_info['data']['volume'], 'my_back')
+ self.assertEquals(conn_info['data']['volume_id'], 'volume_id')
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_create_snapshot_operation(self):
+ """Test volumeops.create_snapshot."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ name = 'snapshot_name'
+ description = 'snapshot_desc'
+ backing = FakeMor('VirtualMachine', 'my_back')
+ task = FakeMor('Task', 'my_task')
+ self._session.invoke_api(self._vim, 'CreateSnapshot_Task', backing,
+ name=name, description=description,
+ memory=False, quiesce=False).AndReturn(task)
+ result = FakeMor('VirtualMachineSnapshot', 'my_snap')
+ success_task_info = FakeTaskInfo('success', result=result)
+ m.StubOutWithMock(self._session, 'wait_for_task')
+ self._session.wait_for_task(task).AndReturn(success_task_info)
+
+ m.ReplayAll()
+ self._volumeops.create_snapshot(backing, name, description)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_create_snapshot_without_backing(self):
+ """Test vmdk.create_snapshot without backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ snapshot = FakeObject()
+ snapshot['volume_name'] = 'volume_name'
+ self._volumeops.get_backing(snapshot['volume_name'])
+
+ m.ReplayAll()
+ self._driver.create_snapshot(snapshot)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_create_snapshot_with_backing(self):
+ """Test vmdk.create_snapshot with backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ snapshot = FakeObject()
+ snapshot['volume_name'] = 'volume_name'
+ snapshot['name'] = 'snapshot_name'
+ snapshot['display_description'] = 'snapshot_desc'
+ backing = FakeMor('VirtualMachine', 'my_back')
+ self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
+ m.StubOutWithMock(self._volumeops, 'create_snapshot')
+ self._volumeops.create_snapshot(backing, snapshot['name'],
+ snapshot['display_description'])
+
+ m.ReplayAll()
+ self._driver.create_snapshot(snapshot)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_get_snapshot_from_tree(self):
+ """Test _get_snapshot_from_tree."""
+ volops = volumeops.VMwareVolumeOps
+ ret = volops._get_snapshot_from_tree(mox.IgnoreArg(), None)
+ self.assertEquals(ret, None)
+ name = 'snapshot_name'
+ snapshot = FakeMor('VirtualMachineSnapshot', 'my_snap')
+ root = FakeSnapshotTree(name='snapshot_name', snapshot=snapshot)
+ ret = volops._get_snapshot_from_tree(name, root)
+ self.assertEquals(ret, snapshot)
+ snapshot1 = FakeMor('VirtualMachineSnapshot', 'my_snap_1')
+ root = FakeSnapshotTree(name='snapshot_name_1', snapshot=snapshot1,
+ childSnapshotList=[root])
+ ret = volops._get_snapshot_from_tree(name, root)
+ self.assertEquals(ret, snapshot)
+
+ def test_get_snapshot(self):
+ """Test get_snapshot."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ name = 'snapshot_name'
+ backing = FakeMor('VirtualMachine', 'my_back')
+ root = FakeSnapshotTree()
+ tree = FakeSnapshotTree(tree=[root])
+ self._session.invoke_api(vim_util, 'get_object_property',
+ self._session.vim, backing,
+ 'snapshot').AndReturn(tree)
+ volops = volumeops.VMwareVolumeOps
+ m.StubOutWithMock(volops, '_get_snapshot_from_tree')
+ volops._get_snapshot_from_tree(name, root)
+
+ m.ReplayAll()
+ self._volumeops.get_snapshot(backing, name)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_delete_snapshot_not_present(self):
+ """Test volumeops.delete_snapshot, when not present."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._volumeops, 'get_snapshot')
+ name = 'snapshot_name'
+ backing = FakeMor('VirtualMachine', 'my_back')
+ self._volumeops.get_snapshot(backing, name)
+
+ m.ReplayAll()
+ self._volumeops.delete_snapshot(backing, name)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_delete_snapshot_when_present(self):
+ """Test volumeops.delete_snapshot, when it is present."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ m.StubOutWithMock(self._volumeops, 'get_snapshot')
+ name = 'snapshot_name'
+ backing = FakeMor('VirtualMachine', 'my_back')
+ snapshot = FakeMor('VirtualMachineSnapshot', 'my_snap')
+ self._volumeops.get_snapshot(backing, name).AndReturn(snapshot)
+ task = FakeMor('Task', 'my_task')
+ self._session.invoke_api(self._session.vim,
+ 'RemoveSnapshot_Task', snapshot,
+ removeChildren=False).AndReturn(task)
+ m.StubOutWithMock(self._session, 'wait_for_task')
+ self._session.wait_for_task(task)
+
+ m.ReplayAll()
+ self._volumeops.delete_snapshot(backing, name)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_delete_snapshot_without_backing(self):
+ """Test delete_snapshot without backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ snapshot = FakeObject()
+ snapshot['volume_name'] = 'volume_name'
+ self._volumeops.get_backing(snapshot['volume_name'])
+
+ m.ReplayAll()
+ self._driver.delete_snapshot(snapshot)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_delete_snapshot_with_backing(self):
+ """Test delete_snapshot with backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ snapshot = FakeObject()
+ snapshot['name'] = 'snapshot_name'
+ snapshot['volume_name'] = 'volume_name'
+ backing = FakeMor('VirtualMachine', 'my_back')
+ self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
+ m.StubOutWithMock(self._volumeops, 'delete_snapshot')
+ self._volumeops.delete_snapshot(backing,
+ snapshot['name'])
+
+ m.ReplayAll()
+ self._driver.delete_snapshot(snapshot)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_create_cloned_volume_without_backing(self):
+ """Test create_cloned_volume without a backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ volume = FakeObject()
+ volume['name'] = 'volume_name'
+ src_vref = FakeObject()
+ src_vref['name'] = 'src_volume_name'
+ self._volumeops.get_backing(src_vref['name'])
+
+ m.ReplayAll()
+ self._driver.create_cloned_volume(volume, src_vref)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_get_path_name(self):
+ """Test get_path_name."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ backing = FakeMor('VirtualMachine', 'my_back')
+
+ class FakePath(object):
+ def __init__(self, path=None):
+ self.vmPathName = path
+
+ path = FakePath()
+ self._session.invoke_api(vim_util, 'get_object_property', self._vim,
+ backing, 'config.files').AndReturn(path)
+
+ m.ReplayAll()
+ self._volumeops.get_path_name(backing)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_delete_file(self):
+ """Test _delete_file."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ src_path = 'src_path'
+ task = FakeMor('Task', 'my_task')
+ self._session.invoke_api(self._vim, 'DeleteDatastoreFile_Task',
+ mox.IgnoreArg(), name=src_path,
+ datacenter=mox.IgnoreArg()).AndReturn(task)
+ m.StubOutWithMock(self._session, 'wait_for_task')
+ self._session.wait_for_task(task)
+
+ m.ReplayAll()
+ self._volumeops._delete_file(src_path)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_copy_backing(self):
+ """Test copy_backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ src_path = 'src_path'
+ dest_path = 'dest_path'
+ task = FakeMor('Task', 'my_task')
+ self._session.invoke_api(self._vim, 'CopyDatastoreFile_Task',
+ mox.IgnoreArg(), sourceName=src_path,
+ destinationName=dest_path).AndReturn(task)
+ m.StubOutWithMock(self._session, 'wait_for_task')
+ self._session.wait_for_task(task)
+
+ m.ReplayAll()
+ self._volumeops.copy_backing(src_path, dest_path)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_register_backing(self):
+ """Test register_backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ path = 'src_path'
+ name = 'name'
+ folder = FakeMor('Folder', 'my_fol')
+ resource_pool = FakeMor('ResourcePool', 'my_rp')
+ task = FakeMor('Task', 'my_task')
+ self._session.invoke_api(self._vim, 'RegisterVM_Task', folder,
+ path=path, name=name, asTemplate=False,
+ pool=resource_pool).AndReturn(task)
+ m.StubOutWithMock(self._session, 'wait_for_task')
+ task_info = FakeTaskInfo('success', mox.IgnoreArg())
+ self._session.wait_for_task(task).AndReturn(task_info)
+
+ m.ReplayAll()
+ self._volumeops.register_backing(path, name, folder, resource_pool)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_clone_backing_by_copying(self):
+ """Test _clone_backing_by_copying."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ volume = FakeObject()
+ volume['name'] = 'volume_name'
+ volume['size'] = 1
+ m.StubOutWithMock(self._volumeops, 'get_path_name')
+ src_path = '/vmfs/volumes/datastore/vm/'
+ vmx_name = 'vm.vmx'
+ backing = FakeMor('VirtualMachine', 'my_back')
+ self._volumeops.get_path_name(backing).AndReturn(src_path + vmx_name)
+ m.StubOutWithMock(self._volumeops, 'get_host')
+ host = FakeMor('HostSystem', 'my_host')
+ self._volumeops.get_host(backing).AndReturn(host)
+ m.StubOutWithMock(self._volumeops, 'get_dss_rp')
+ datastores = [FakeMor('Datastore', 'my_ds')]
+ resource_pool = FakeMor('ResourcePool', 'my_rp')
+ self._volumeops.get_dss_rp(host).AndReturn((datastores, resource_pool))
+ m.StubOutWithMock(self._driver, '_get_folder_ds_summary')
+ folder = FakeMor('Folder', 'my_fol')
+ summary = FakeDatastoreSummary(1, 1, datastore=datastores[0],
+ name='datastore_name')
+ self._driver._get_folder_ds_summary(volume['size'], resource_pool,
+ datastores).AndReturn((folder,
+ summary))
+ m.StubOutWithMock(self._volumeops, 'copy_backing')
+ dest_path = '[%s] %s' % (summary.name, volume['name'])
+ self._volumeops.copy_backing(src_path, dest_path)
+ m.StubOutWithMock(self._volumeops, 'register_backing')
+ self._volumeops.register_backing(dest_path + '/' + vmx_name,
+ volume['name'], folder, resource_pool)
+
+ m.ReplayAll()
+ self._driver._clone_backing_by_copying(volume, backing)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_create_cloned_volume_with_backing(self):
+ """Test create_cloned_volume with a backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ volume = FakeObject()
+ src_vref = FakeObject()
+ src_vref['name'] = 'src_snapshot_name'
+ backing = FakeMor('VirtualMachine', 'my_vm')
+ self._volumeops.get_backing(src_vref['name']).AndReturn(backing)
+ m.StubOutWithMock(self._driver, '_clone_backing_by_copying')
+ self._driver._clone_backing_by_copying(volume, backing)
+
+ m.ReplayAll()
+ self._driver.create_cloned_volume(volume, src_vref)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_create_volume_from_snapshot_without_backing(self):
+ """Test create_volume_from_snapshot without a backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ volume = FakeObject()
+ volume['name'] = 'volume_name'
+ snapshot = FakeObject()
+ snapshot['volume_name'] = 'volume_name'
+ self._volumeops.get_backing(snapshot['volume_name'])
+
+ m.ReplayAll()
+ self._driver.create_volume_from_snapshot(volume, snapshot)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_create_volume_from_snap_without_backing_snap(self):
+ """Test create_volume_from_snapshot without a backing snapshot."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ backing = FakeMor('VirtualMachine', 'my_vm')
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ volume = FakeObject()
+ volume['name'] = 'volume_name'
+ snapshot = FakeObject()
+ snapshot['volume_name'] = 'volume_name'
+ self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
+ m.StubOutWithMock(self._volumeops, 'get_snapshot')
+ snapshot['name'] = 'snapshot_name'
+ self._volumeops.get_snapshot(backing, snapshot['name'])
+
+ m.ReplayAll()
+ self._driver.create_volume_from_snapshot(volume, snapshot)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_revert_to_snapshot(self):
+ """Test revert_to_snapshot."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ task = FakeMor('Task', 'my_task')
+ snapshot = FakeMor('VirtualMachineSnapshot', 'my_snap')
+ self._session.invoke_api(self._vim, 'RevertToSnapshot_Task',
+ snapshot).AndReturn(task)
+
+ m.StubOutWithMock(self._session, 'wait_for_task')
+ self._session.wait_for_task(task)
+
+ m.ReplayAll()
+ self._volumeops.revert_to_snapshot(snapshot)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_create_volume_from_snapshot(self):
+ """Test create_volume_from_snapshot."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ backing = FakeMor('VirtualMachine', 'my_vm')
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ volume = FakeObject()
+ snapshot = FakeObject()
+ snapshot['volume_name'] = 'volume_name'
+ self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
+ m.StubOutWithMock(self._volumeops, 'get_snapshot')
+ snapshot['name'] = 'snapshot_name'
+ snapshot_mor = FakeMor('VirtualMachineSnapshot', 'my_snap')
+ self._volumeops.get_snapshot(backing,
+ snapshot['name']).AndReturn(snapshot_mor)
+ m.StubOutWithMock(self._driver, '_clone_backing_by_copying')
+ clone = FakeMor('VirtualMachine', 'my_clone')
+ self._driver._clone_backing_by_copying(volume,
+ backing).AndReturn(clone)
+ clone_snap = FakeMor('VirtualMachineSnapshot', 'my_clone_snap')
+ self._volumeops.get_snapshot(clone,
+ snapshot['name']).AndReturn(clone_snap)
+ m.StubOutWithMock(self._volumeops, 'revert_to_snapshot')
+ self._volumeops.revert_to_snapshot(clone_snap)
+
+ m.ReplayAll()
+ self._driver.create_volume_from_snapshot(volume, snapshot)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+
+class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
+ """Test class for VMwareVcVmdkDriver."""
+
+ def setUp(self):
+ super(VMwareVcVmdkDriverTestCase, self).setUp()
+ self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config)
+
+ def test_create_folder_not_present(self):
+ """Test create_folder when not present."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ parent_folder = FakeMor('Folder', 'my_par_fol')
+ child_entities = FakeManagedObjectReference()
+ self._session.invoke_api(vim_util, 'get_object_property',
+ self._vim, parent_folder,
+ 'childEntity').AndReturn(child_entities)
+ self._session.invoke_api(self._vim, 'CreateFolder', parent_folder,
+ name='child_folder_name')
+
+ m.ReplayAll()
+ dc = self._volumeops.create_folder(parent_folder, 'child_folder_name')
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_create_folder_already_present(self):
+ """Test create_folder when already present."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ parent_folder = FakeMor('Folder', 'my_par_fol')
+ child_folder = FakeMor('Folder', 'my_child_fol')
+ child_entities = FakeManagedObjectReference([child_folder])
+ self._session.invoke_api(vim_util, 'get_object_property',
+ self._vim, parent_folder,
+ 'childEntity').AndReturn(child_entities)
+ self._session.invoke_api(vim_util, 'get_object_property',
+ self._vim, child_folder,
+ 'name').AndReturn('child_folder_name')
+
+ m.ReplayAll()
+ fol = self._volumeops.create_folder(parent_folder, 'child_folder_name')
+ self.assertEquals(fol, child_folder)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_relocate_backing(self):
+ """Test relocate_backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._volumeops, '_get_relocate_spec')
+ datastore = FakeMor('Datastore', 'my_ds')
+ resource_pool = FakeMor('ResourcePool', 'my_rp')
+ host = FakeMor('HostSystem', 'my_host')
+ disk_move_type = 'moveAllDiskBackingsAndAllowSharing'
+ self._volumeops._get_relocate_spec(datastore, resource_pool, host,
+ disk_move_type)
+ m.StubOutWithMock(self._session, 'invoke_api')
+ backing = FakeMor('VirtualMachine', 'my_back')
+ task = FakeMor('Task', 'my_task')
+ self._session.invoke_api(self._vim, 'RelocateVM_Task',
+ backing, spec=mox.IgnoreArg()).AndReturn(task)
+ m.StubOutWithMock(self._session, 'wait_for_task')
+ self._session.wait_for_task(task)
+
+ m.ReplayAll()
+ self._volumeops.relocate_backing(backing, datastore,
+ resource_pool, host)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_move_backing_to_folder(self):
+ """Test move_backing_to_folder."""
+ m = mox.Mox()
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ backing = FakeMor('VirtualMachine', 'my_back')
+ folder = FakeMor('Folder', 'my_fol')
+ task = FakeMor('Task', 'my_task')
+ self._session.invoke_api(self._vim, 'MoveIntoFolder_Task',
+ folder, list=[backing]).AndReturn(task)
+ m.StubOutWithMock(self._session, 'wait_for_task')
+ self._session.wait_for_task(task)
+
+ m.ReplayAll()
+ self._volumeops.move_backing_to_folder(backing, folder)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_init_conn_with_instance_and_backing(self):
+ """Test initialize_connection with instance and backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ volume = FakeObject()
+ volume['name'] = 'volume_name'
+ volume['id'] = 'volume_id'
+ volume['size'] = 1
+ connector = {'instance': 'my_instance'}
+ backing = FakeMor('VirtualMachine', 'my_back')
+ self._volumeops.get_backing(volume['name']).AndReturn(backing)
+ m.StubOutWithMock(self._volumeops, 'get_host')
+ host = FakeMor('HostSystem', 'my_host')
+ self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
+ datastore = FakeMor('Datastore', 'my_ds')
+ resource_pool = FakeMor('ResourcePool', 'my_rp')
+ m.StubOutWithMock(self._volumeops, 'get_dss_rp')
+ self._volumeops.get_dss_rp(host).AndReturn(([datastore],
+ resource_pool))
+ m.StubOutWithMock(self._volumeops, 'get_datastore')
+ self._volumeops.get_datastore(backing).AndReturn(datastore)
+
+ m.ReplayAll()
+ conn_info = self._driver.initialize_connection(volume, connector)
+ self.assertEquals(conn_info['driver_volume_type'], 'vmdk')
+ self.assertEquals(conn_info['data']['volume'], 'my_back')
+ self.assertEquals(conn_info['data']['volume_id'], 'volume_id')
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_get_volume_group_folder(self):
+ """Test _get_volume_group_folder."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ datacenter = FakeMor('Datacenter', 'my_dc')
+ m.StubOutWithMock(self._volumeops, 'get_vmfolder')
+ self._volumeops.get_vmfolder(datacenter)
+ m.StubOutWithMock(self._volumeops, 'create_folder')
+ self._volumeops.create_folder(mox.IgnoreArg(),
+ self._config.vmware_volume_folder)
+
+ m.ReplayAll()
+ self._driver._get_volume_group_folder(datacenter)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_init_conn_with_instance_and_backing_and_relocation(self):
+ """Test initialize_connection with backing being relocated."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ volume = FakeObject()
+ volume['name'] = 'volume_name'
+ volume['id'] = 'volume_id'
+ volume['size'] = 1
+ connector = {'instance': 'my_instance'}
+ backing = FakeMor('VirtualMachine', 'my_back')
+ self._volumeops.get_backing(volume['name']).AndReturn(backing)
+ m.StubOutWithMock(self._volumeops, 'get_host')
+ host = FakeMor('HostSystem', 'my_host')
+ self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
+ datastore1 = FakeMor('Datastore', 'my_ds_1')
+ datastore2 = FakeMor('Datastore', 'my_ds_2')
+ resource_pool = FakeMor('ResourcePool', 'my_rp')
+ m.StubOutWithMock(self._volumeops, 'get_dss_rp')
+ self._volumeops.get_dss_rp(host).AndReturn(([datastore1],
+ resource_pool))
+ m.StubOutWithMock(self._volumeops, 'get_datastore')
+ self._volumeops.get_datastore(backing).AndReturn(datastore2)
+ m.StubOutWithMock(self._driver, '_get_folder_ds_summary')
+ folder = FakeMor('Folder', 'my_fol')
+ summary = FakeDatastoreSummary(1, 1, datastore1)
+ size = 1
+ self._driver._get_folder_ds_summary(size, resource_pool,
+ [datastore1]).AndReturn((folder,
+ summary))
+ m.StubOutWithMock(self._volumeops, 'relocate_backing')
+ self._volumeops.relocate_backing(backing, datastore1,
+ resource_pool, host)
+ m.StubOutWithMock(self._volumeops, 'move_backing_to_folder')
+ self._volumeops.move_backing_to_folder(backing, folder)
+
+ m.ReplayAll()
+ conn_info = self._driver.initialize_connection(volume, connector)
+ self.assertEquals(conn_info['driver_volume_type'], 'vmdk')
+ self.assertEquals(conn_info['data']['volume'], 'my_back')
+ self.assertEquals(conn_info['data']['volume_id'], 'volume_id')
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_get_folder(self):
+ """Test _get_folder."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._volumeops, '_get_parent')
+ self._volumeops._get_parent(mox.IgnoreArg(), 'Folder')
+
+ m.ReplayAll()
+ self._volumeops._get_folder(mox.IgnoreArg())
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_volumeops_clone_backing(self):
+ """Test volumeops.clone_backing."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._volumeops, '_get_parent')
+ backing = FakeMor('VirtualMachine', 'my_back')
+ folder = FakeMor('Folder', 'my_fol')
+ self._volumeops._get_folder(backing).AndReturn(folder)
+ m.StubOutWithMock(self._volumeops, '_get_clone_spec')
+ name = 'name'
+ snapshot = FakeMor('VirtualMachineSnapshot', 'my_snap')
+ datastore = FakeMor('Datastore', 'my_ds')
+ self._volumeops._get_clone_spec(datastore, mox.IgnoreArg(), snapshot)
+ m.StubOutWithMock(api.VMwareAPISession, 'vim')
+ self._session.vim = self._vim
+ m.StubOutWithMock(self._session, 'invoke_api')
+ task = FakeMor('Task', 'my_task')
+ self._session.invoke_api(self._vim, 'CloneVM_Task', backing,
+ folder=folder, name=name,
+ spec=mox.IgnoreArg()).AndReturn(task)
+ m.StubOutWithMock(self._session, 'wait_for_task')
+ clone = FakeMor('VirtualMachine', 'my_clone')
+ task_info = FakeTaskInfo('success', clone)
+ self._session.wait_for_task(task).AndReturn(task_info)
+
+ m.ReplayAll()
+ ret = self._volumeops.clone_backing(name, backing, snapshot,
+ mox.IgnoreArg(), datastore)
+ self.assertEquals(ret, clone)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_clone_backing_linked(self):
+ """Test _clone_backing with clone type - linked."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'clone_backing')
+ volume = FakeObject()
+ volume['name'] = 'volume_name'
+ self._volumeops.clone_backing(volume['name'], mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ volumeops.LINKED_CLONE_TYPE,
+ mox.IgnoreArg())
+
+ m.ReplayAll()
+ self._driver._clone_backing(volume, mox.IgnoreArg(), mox.IgnoreArg(),
+ volumeops.LINKED_CLONE_TYPE)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_clone_backing_full(self):
+ """Test _clone_backing with clone type - full."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_host')
+ backing = FakeMor('VirtualMachine', 'my_vm')
+ host = FakeMor('HostSystem', 'my_host')
+ self._volumeops.get_host(backing).AndReturn(host)
+ m.StubOutWithMock(self._volumeops, 'get_dss_rp')
+ datastore = FakeMor('Datastore', 'my_ds')
+ datastores = [datastore]
+ resource_pool = FakeMor('ResourcePool', 'my_rp')
+ self._volumeops.get_dss_rp(host).AndReturn((datastores,
+ resource_pool))
+ m.StubOutWithMock(self._driver, '_select_datastore_summary')
+ volume = FakeObject()
+ volume['name'] = 'volume_name'
+ volume['size'] = 1
+ summary = FakeDatastoreSummary(1, 1, datastore=datastore)
+ self._driver._select_datastore_summary(volume['size'] * units.GiB,
+ datastores).AndReturn(summary)
+ m.StubOutWithMock(self._volumeops, 'clone_backing')
+ self._volumeops.clone_backing(volume['name'], backing,
+ mox.IgnoreArg(),
+ volumeops.FULL_CLONE_TYPE,
+ datastore)
+
+ m.ReplayAll()
+ self._driver._clone_backing(volume, backing, mox.IgnoreArg(),
+ volumeops.FULL_CLONE_TYPE)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_create_volume_from_snapshot(self):
+ """Test create_volume_from_snapshot."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ snapshot = FakeObject()
+ snapshot['volume_name'] = 'volume_name'
+ snapshot['name'] = 'snapshot_name'
+ backing = FakeMor('VirtualMachine', 'my_back')
+ self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
+ m.StubOutWithMock(self._volumeops, 'get_snapshot')
+ snap_mor = FakeMor('VirtualMachineSnapshot', 'my_snap')
+ self._volumeops.get_snapshot(backing,
+ snapshot['name']).AndReturn(snap_mor)
+ volume = FakeObject()
+ volume['volume_type_id'] = None
+ m.StubOutWithMock(self._driver, '_clone_backing')
+ self._driver._clone_backing(volume, backing, snap_mor, mox.IgnoreArg())
+
+ m.ReplayAll()
+ self._driver.create_volume_from_snapshot(volume, snapshot)
+ m.UnsetStubs()
+ m.VerifyAll()
+
+ def test_create_cloned_volume_with_backing(self):
+ """Test create_cloned_volume with clone type - full."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ backing = FakeMor('VirtualMachine', 'my_back')
+ src_vref = FakeObject()
+ src_vref['name'] = 'src_vol_name'
+ self._volumeops.get_backing(src_vref['name']).AndReturn(backing)
+ volume = FakeObject()
+ volume['volume_type_id'] = None
+ m.StubOutWithMock(self._driver, '_clone_backing')
+ self._driver._clone_backing(volume, backing, mox.IgnoreArg(),
+ volumeops.FULL_CLONE_TYPE)
+
+ m.ReplayAll()
+ self._driver.create_cloned_volume(volume, src_vref)
+ m.UnsetStubs()
+
+ def test_create_lined_cloned_volume_with_backing(self):
+ """Test create_cloned_volume with clone type - linked."""
+ m = mox.Mox()
+ m.StubOutWithMock(self._driver.__class__, 'volumeops')
+ self._driver.volumeops = self._volumeops
+ m.StubOutWithMock(self._volumeops, 'get_backing')
+ backing = FakeMor('VirtualMachine', 'my_back')
+ src_vref = FakeObject()
+ src_vref['name'] = 'src_vol_name'
+ self._volumeops.get_backing(src_vref['name']).AndReturn(backing)
+ volume = FakeObject()
+ volume['id'] = 'volume_id'
+ m.StubOutWithMock(vmdk.VMwareVcVmdkDriver, '_get_clone_type')
+ moxed = vmdk.VMwareVcVmdkDriver._get_clone_type(volume)
+ moxed.AndReturn(volumeops.LINKED_CLONE_TYPE)
+ m.StubOutWithMock(self._volumeops, 'create_snapshot')
+ name = 'snapshot-%s' % volume['id']
+ snapshot = FakeMor('VirtualMachineSnapshot', 'my_snap')
+ self._volumeops.create_snapshot(backing, name,
+ None).AndReturn(snapshot)
+ m.StubOutWithMock(self._driver, '_clone_backing')
+ self._driver._clone_backing(volume, backing, snapshot,
+ volumeops.LINKED_CLONE_TYPE)
+
+ m.ReplayAll()
+ self._driver.create_cloned_volume(volume, src_vref)
+ m.UnsetStubs()
+ m.VerifyAll()
--- /dev/null
+# Copyright (c) 2013 VMware, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`vmware` -- Volume support for VMware compatible datastores.
+"""
--- /dev/null
+# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 VMware, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Session and API call management for VMware ESX/VC server.
+Provides abstraction over cinder.volume.drivers.vmware.vim.Vim SOAP calls.
+"""
+
+from eventlet import event
+
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import loopingcall
+from cinder.volume.drivers.vmware import error_util
+from cinder.volume.drivers.vmware import vim
+from cinder.volume.drivers.vmware import vim_util
+
+LOG = logging.getLogger(__name__)
+
+
+class Retry(object):
+ """Decorator for retrying a function upon suggested exceptions.
+
+ The method retries for given number of times and the sleep
+ time increments till the max sleep time is reached.
+ If max retries is set to -1, then the decorated function is
+ invoked indefinitely till no exception is thrown or if
+ the caught exception is not in the list of suggested exceptions.
+ """
+
+ def __init__(self, max_retry_count=-1, inc_sleep_time=10,
+ max_sleep_time=60, exceptions=()):
+ """Initialize retry object based on input params.
+
+ :param max_retry_count: Max number of times, a function must be
+ retried when one of input 'exceptions'
+ is caught. The default -1 will always
+ retry the function till a non-exception
+ case, or an un-wanted error case arises.
+ :param inc_sleep_time: Incremental time in seconds for sleep time
+ between retrial
+ :param max_sleep_time: Max sleep time beyond which the sleep time will
+ not be incremented using param inc_sleep_time
+ and max_sleep_time will be used as sleep time
+ :param exceptions: Suggested exceptions for which the function must be
+ retried
+ """
+ self._max_retry_count = max_retry_count
+ self._inc_sleep_time = inc_sleep_time
+ self._max_sleep_time = max_sleep_time
+ self._exceptions = exceptions
+ self._retry_count = 0
+ self._sleep_time = 0
+
+ def __call__(self, f):
+
+ def _func(done, *args, **kwargs):
+ try:
+ result = f(*args, **kwargs)
+ done.send(result)
+ except self._exceptions as excep:
+ LOG.exception(_("Failure while invoking function: "
+ "%(func)s. Error: %(excep)s.") %
+ {'func': f.__name__, 'excep': excep})
+ if (self._max_retry_count != -1 and
+ self._retry_count >= self._max_retry_count):
+ done.send_exception(excep)
+ else:
+ self._retry_count += 1
+ self._sleep_time += self._inc_sleep_time
+ return self._sleep_time
+ except Exception as excep:
+ done.send_exception(excep)
+ return 0
+
+ def func(*args, **kwargs):
+ done = event.Event()
+ loop = loopingcall.DynamicLoopingCall(_func, done, *args, **kwargs)
+ loop.start(periodic_interval_max=self._max_sleep_time)
+ result = done.wait()
+ loop.stop()
+ return result
+
+ return func
+
+
+class VMwareAPISession(object):
+ """Sets up a session with the server and handles all calls made to it."""
+
+ @Retry(exceptions=(Exception))
+ def __init__(self, server_ip, server_username, server_password,
+ api_retry_count, task_poll_interval, scheme='https',
+ create_session=True, wsdl_loc=None):
+ """Constructs session object.
+
+ :param server_ip: IP address of ESX/VC server
+ :param server_username: Username of ESX/VC server admin user
+ :param server_password: Password for param server_username
+ :param api_retry_count: Number of times an API must be retried upon
+ session/connection related errors
+ :param task_poll_interval: Sleep time in seconds for polling an
+ on-going async task as part of the API call
+ :param scheme: http or https protocol
+ :param create_session: Boolean whether to set up connection at the
+ time of instance creation
+ :param wsdl_loc: WSDL file location for invoking SOAP calls on server
+ using suds
+ """
+ self._server_ip = server_ip
+ self._server_username = server_username
+ self._server_password = server_password
+ self._wsdl_loc = wsdl_loc
+ self._api_retry_count = api_retry_count
+ self._task_poll_interval = task_poll_interval
+ self._scheme = scheme
+ self._session_id = None
+ self._vim = None
+ if create_session:
+ self.create_session()
+
+ @property
+ def vim(self):
+ if not self._vim:
+ self._vim = vim.Vim(protocol=self._scheme, host=self._server_ip,
+ wsdl_loc=self._wsdl_loc)
+ return self._vim
+
+ def create_session(self):
+ """Establish session with the server."""
+ # Login and setup the session with the server for making
+ # API calls
+ session_manager = self.vim.service_content.sessionManager
+ session = self.vim.Login(session_manager,
+ userName=self._server_username,
+ password=self._server_password)
+ # Terminate the earlier session, if possible (For the sake of
+ # preserving sessions as there is a limit to the number of
+ # sessions we can have)
+ if self._session_id:
+ try:
+ self.vim.TerminateSession(session_manager,
+ sessionId=[self._session_id])
+ except Exception as excep:
+ # This exception is something we can live with. It is
+ # just an extra caution on our side. The session may
+ # have been cleared. We could have made a call to
+ # SessionIsActive, but that is an overhead because we
+ # anyway would have to call TerminateSession.
+ LOG.exception(_("Error while terminating session: %s.") %
+ excep)
+ self._session_id = session.key
+ LOG.info(_("Successfully established connection to the server."))
+
+ def __del__(self):
+ """Logs-out the session."""
+ try:
+ self.vim.Logout(self.vim.service_content.sessionManager)
+ except Exception as excep:
+ LOG.exception(_("Error while logging out the user: %s.") %
+ excep)
+
+ def invoke_api(self, module, method, *args, **kwargs):
+ """Wrapper method for invoking APIs.
+
+ Here we retry the API calls for exceptions which may come because
+ of session overload.
+
+ Make sure if a Vim instance is being passed here, this session's
+ Vim (self.vim) instance is used, as we retry establishing session
+ in case of session timedout.
+
+ :param module: Module invoking the VI SDK calls
+ :param method: Method in the module that invokes the VI SDK call
+ :param args: Arguments to the method
+ :param kwargs: Keyword arguments to the method
+ :return: Response of the API call
+ """
+
+ @Retry(max_retry_count=self._api_retry_count,
+ exceptions=(error_util.VimException))
+ def _invoke_api(module, method, *args, **kwargs):
+ last_fault_list = []
+ while True:
+ try:
+ api_method = getattr(module, method)
+ return api_method(*args, **kwargs)
+ except error_util.VimFaultException as excep:
+ if error_util.NOT_AUTHENTICATED not in excep.fault_list:
+ raise excep
+ # If it is a not-authenticated fault, we re-authenticate
+ # the user and retry the API invocation.
+
+ # Because of the idle session returning an empty
+ # RetrieveProperties response and also the same is
+ # returned when there is an empty answer to a query
+ # (e.g. no VMs on the host), we have no way to
+ # differentiate.
+ # So if the previous response was also an empty
+ # response and after creating a new session, we get
+ # the same empty response, then we are sure of the
+ # response being an empty response.
+ if error_util.NOT_AUTHENTICATED in last_fault_list:
+ return []
+ last_fault_list = excep.fault_list
+ LOG.exception(_("Not authenticated error occurred. "
+ "Will create session and try "
+ "API call again: %s.") % excep)
+ self.create_session()
+
+ return _invoke_api(module, method, *args, **kwargs)
+
+ def wait_for_task(self, task):
+ """Return a deferred that will give the result of the given task.
+
+ The task is polled until it completes. The method returns the task
+ information upon successful completion.
+
+ :param task: Managed object reference of the task
+ :return: Task info upon successful completion of the task
+ """
+ done = event.Event()
+ loop = loopingcall.FixedIntervalLoopingCall(self._poll_task,
+ task, done)
+ loop.start(self._task_poll_interval)
+ task_info = done.wait()
+ loop.stop()
+ return task_info
+
+ def _poll_task(self, task, done):
+ """Poll the given task.
+
+ If the task completes successfully then returns task info.
+ In case of error sends back appropriate error.
+
+ :param task: Managed object reference of the task
+ :param event: Event that captures task status
+ """
+ try:
+ task_info = self.invoke_api(vim_util, 'get_object_property',
+ self.vim, task, 'info')
+ if task_info.state in ['queued', 'running']:
+ # If task already completed on server, it will not return
+ # the progress.
+ if hasattr(task_info, 'progress'):
+ LOG.debug(_("Task: %(task)s progress: %(prog)s.") %
+ {'task': task, 'prog': task_info.progress})
+ return
+ elif task_info.state == 'success':
+ LOG.debug(_("Task %s status: success.") % task)
+ done.send(task_info)
+ else:
+ error_msg = str(task_info.error.localizedMessage)
+ LOG.exception(_("Task: %(task)s failed with error: %(err)s.") %
+ {'task': task, 'err': error_msg})
+ done.send_exception(error_util.VimFaultException([],
+ error_msg))
+ except Exception as excep:
+ LOG.exception(_("Task: %(task)s failed with error: %(err)s.") %
+ {'task': task, 'err': excep})
+ done.send_exception(excep)
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 VMware, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Exception classes and SOAP response error checking module.
+"""
+
+from cinder import exception
+
+NOT_AUTHENTICATED = 'NotAuthenticated'
+
+
+class VimException(exception.CinderException):
+ """The VIM Exception class."""
+
+ def __init__(self, msg):
+ exception.CinderException.__init__(self, msg)
+
+
+class SessionOverLoadException(VimException):
+ """Session Overload Exception."""
+ pass
+
+
+class VimAttributeException(VimException):
+ """VI Attribute Error."""
+ pass
+
+
+class VimFaultException(exception.VolumeBackendAPIException):
+ """The VIM Fault exception class."""
+
+ def __init__(self, fault_list, msg):
+ exception.VolumeBackendAPIException.__init__(self, msg)
+ self.fault_list = fault_list
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 VMware, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Classes for making VMware VI SOAP calls.
+"""
+
+import httplib
+import suds
+
+from cinder.volume.drivers.vmware import error_util
+
+RESP_NOT_XML_ERROR = "Response is 'text/html', not 'text/xml'"
+CONN_ABORT_ERROR = 'Software caused connection abort'
+ADDRESS_IN_USE_ERROR = 'Address already in use'
+
+
+def get_moref(value, type):
+ """Get managed object reference.
+
+ :param value: value for the managed object
+ :param type: type of the managed object
+ :return: Managed object reference with with input value and type
+ """
+ moref = suds.sudsobject.Property(value)
+ moref._type = type
+ return moref
+
+
+class VIMMessagePlugin(suds.plugin.MessagePlugin):
+
+ def addAttributeForValue(self, node):
+ """Helper to handle AnyType.
+
+ suds does not handle AnyType properly.
+ VI SDK requires type attribute to be set when AnyType is used
+
+ :param node: XML value node
+ """
+ if node.name == 'value':
+ node.set('xsi:type', 'xsd:string')
+
+ def marshalled(self, context):
+ """Marshal soap context.
+
+ Provides the plugin with the opportunity to prune empty
+ nodes and fixup nodes before sending it to the server.
+
+ :param context: SOAP context
+ """
+ # suds builds the entire request object based on the wsdl schema.
+ # VI SDK throws server errors if optional SOAP nodes are sent
+ # without values, e.g. <test/> as opposed to <test>test</test>
+ context.envelope.prune()
+ context.envelope.walk(self.addAttributeForValue)
+
+
+class Vim(object):
+ """The VIM Object."""
+
+ def __init__(self, protocol='https', host='localhost', wsdl_loc=None):
+ """Create communication interfaces for initiating SOAP transactions.
+
+ :param protocol: http or https
+ :param host: Server IPAddress[:port] or Hostname[:port]
+ """
+ self._protocol = protocol
+ self._host_name = host
+ if not wsdl_loc:
+ wsdl_loc = Vim._get_wsdl_loc(protocol, host)
+ soap_url = Vim._get_soap_url(protocol, host)
+ self._client = suds.client.Client(wsdl_loc, location=soap_url,
+ plugins=[VIMMessagePlugin()])
+ self._service_content = self.RetrieveServiceContent('ServiceInstance')
+
+ @staticmethod
+ def _get_wsdl_loc(protocol, host_name):
+ """Return default WSDL file location hosted at the server.
+
+ :param protocol: http or https
+ :param host_name: ESX/VC server host name
+ :return: Default WSDL file location hosted at the server
+ """
+ return '%s://%s/sdk/vimService.wsdl' % (protocol, host_name)
+
+ @staticmethod
+ def _get_soap_url(protocol, host_name):
+ """Return URL to SOAP services for ESX/VC server.
+
+ :param protocol: https or http
+ :param host_name: ESX/VC server host name
+ :return: URL to SOAP services for ESX/VC server
+ """
+ return '%s://%s/sdk' % (protocol, host_name)
+
+ @property
+ def service_content(self):
+ return self._service_content
+
+ @property
+ def client(self):
+ return self._client
+
+ def __getattr__(self, attr_name):
+ """Makes the API call and gets the result."""
+
+ def retrieve_properties_fault_checker(response):
+ """Checks the RetrieveProperties response for errors.
+
+ Certain faults are sent as part of the SOAP body as property of
+ missingSet. For example NotAuthenticated fault. The method raises
+ appropriate VimFaultException when an error is found.
+
+ :param response: Response from RetrieveProperties API call
+ """
+ fault_list = []
+ if not response:
+ # This is the case when the session has timed out. ESX SOAP
+ # server sends an empty RetrievePropertiesResponse. Normally
+ # missingSet in the returnval field has the specifics about
+ # the error, but that's not the case with a timed out idle
+ # session. It is as bad as a terminated session for we cannot
+ # use the session. So setting fault to NotAuthenticated fault.
+ fault_list = [error_util.NOT_AUTHENTICATED]
+ else:
+ for obj_cont in response:
+ if hasattr(obj_cont, 'missingSet'):
+ for missing_elem in obj_cont.missingSet:
+ fault_type = missing_elem.fault.fault.__class__
+ # Fault needs to be added to the type of fault
+ # for uniformity in error checking as SOAP faults
+ # define
+ fault_list.append(fault_type.__name__)
+ if fault_list:
+ exc_msg_list = ', '.join(fault_list)
+ raise error_util.VimFaultException(fault_list,
+ _("Error(s): %s occurred "
+ "in the call to "
+ "RetrieveProperties.") %
+ exc_msg_list)
+
+ def vim_request_handler(managed_object, **kwargs):
+ """Handler for VI SDK calls.
+
+ Builds the SOAP message and parses the response for fault
+ checking and other errors.
+
+ :param managed_object:Managed object reference
+ :param kwargs: Keyword arguments of the call
+ :return: Response of the API call
+ """
+ try:
+ if isinstance(managed_object, str):
+ # For strings use string value for value and type
+ # of the managed object.
+ managed_object = get_moref(managed_object, managed_object)
+ request = getattr(self._client.service, attr_name)
+ response = request(managed_object, **kwargs)
+ if (attr_name.lower() == 'retrieveproperties'):
+ retrieve_properties_fault_checker(response)
+ return response
+
+ except error_util.VimFaultException as excep:
+ raise
+
+ except suds.WebFault as excep:
+ doc = excep.document
+ detail = doc.childAtPath('/Envelope/Body/Fault/detail')
+ fault_list = []
+ for child in detail.getChildren():
+ fault_list.append(child.get('type'))
+ raise error_util.VimFaultException(fault_list, str(excep))
+
+ except AttributeError as excep:
+ raise error_util.VimAttributeException(_("No such SOAP method "
+ "%(attr)s. Detailed "
+ "error: %(excep)s.") %
+ {'attr': attr_name,
+ 'excep': excep})
+
+ except (httplib.CannotSendRequest,
+ httplib.ResponseNotReady,
+ httplib.CannotSendHeader) as excep:
+ raise error_util.SessionOverLoadException(_("httplib error in "
+ "%(attr)s: "
+ "%(excep)s.") %
+ {'attr': attr_name,
+ 'excep': excep})
+
+ except Exception as excep:
+ # Socket errors which need special handling for they
+ # might be caused by server API call overload
+ if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
+ str(excep).find(CONN_ABORT_ERROR)) != -1:
+ raise error_util.SessionOverLoadException(_("Socket error "
+ "in %(attr)s: "
+ "%(excep)s.") %
+ {'attr':
+ attr_name,
+ 'excep': excep})
+ # Type error that needs special handling for it might be
+ # caused by server API call overload
+ elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
+ raise error_util.SessionOverLoadException(_("Type error "
+ "in %(attr)s: "
+ "%(excep)s.") %
+ {'attr':
+ attr_name,
+ 'excep': excep})
+ else:
+ raise error_util.VimException(_("Error in %(attr)s. "
+ "Detailed error: "
+ "%(excep)s.") %
+ {'attr': attr_name,
+ 'excep': excep})
+ return vim_request_handler
+
+ def __repr__(self):
+ return "VIM Object."
+
+ def __str__(self):
+ return "VIM Object."
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 VMware, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+The VMware API utility module.
+"""
+
+
+def build_selection_spec(client_factory, name):
+ """Builds the selection spec.
+
+ :param client_factory: Factory to get API input specs
+ :param name: Name for the selection spec
+ :return: Selection spec
+ """
+ sel_spec = client_factory.create('ns0:SelectionSpec')
+ sel_spec.name = name
+ return sel_spec
+
+
+def build_traversal_spec(client_factory, name, type, path, skip,
+ select_set):
+ """Builds the traversal spec object.
+
+ :param client_factory: Factory to get API input specs
+ :param name: Name for the traversal spec
+ :param type: Type of the managed object reference
+ :param path: Property path of the managed object reference
+ :param skip: Whether or not to filter the object identified by param path
+ :param select_set: Set of selection specs specifying additional objects
+ to filter
+ :return: Traversal spec
+ """
+ traversal_spec = client_factory.create('ns0:TraversalSpec')
+ traversal_spec.name = name
+ traversal_spec.type = type
+ traversal_spec.path = path
+ traversal_spec.skip = skip
+ traversal_spec.selectSet = select_set
+ return traversal_spec
+
+
+def build_recursive_traversal_spec(client_factory):
+ """Builds Recursive Traversal Spec to traverse managed object hierarchy.
+
+ :param client_factory: Factory to get API input specs
+ :return: Recursive traversal spec
+ """
+ visit_folders_select_spec = build_selection_spec(client_factory,
+ 'visitFolders')
+ # Next hop from Datacenter
+ dc_to_hf = build_traversal_spec(client_factory, 'dc_to_hf', 'Datacenter',
+ 'hostFolder', False,
+ [visit_folders_select_spec])
+ dc_to_vmf = build_traversal_spec(client_factory, 'dc_to_vmf', 'Datacenter',
+ 'vmFolder', False,
+ [visit_folders_select_spec])
+
+ # Next hop from HostSystem
+ h_to_vm = build_traversal_spec(client_factory, 'h_to_vm', 'HostSystem',
+ 'vm', False,
+ [visit_folders_select_spec])
+
+ # Next hop from ComputeResource
+ cr_to_h = build_traversal_spec(client_factory, 'cr_to_h',
+ 'ComputeResource', 'host', False, [])
+ cr_to_ds = build_traversal_spec(client_factory, 'cr_to_ds',
+ 'ComputeResource', 'datastore', False, [])
+
+ rp_to_rp_select_spec = build_selection_spec(client_factory, 'rp_to_rp')
+ rp_to_vm_select_spec = build_selection_spec(client_factory, 'rp_to_vm')
+
+ cr_to_rp = build_traversal_spec(client_factory, 'cr_to_rp',
+ 'ComputeResource', 'resourcePool', False,
+ [rp_to_rp_select_spec,
+ rp_to_vm_select_spec])
+
+ # Next hop from ClusterComputeResource
+ ccr_to_h = build_traversal_spec(client_factory, 'ccr_to_h',
+ 'ClusterComputeResource', 'host',
+ False, [])
+ ccr_to_ds = build_traversal_spec(client_factory, 'ccr_to_ds',
+ 'ClusterComputeResource', 'datastore',
+ False, [])
+ ccr_to_rp = build_traversal_spec(client_factory, 'ccr_to_rp',
+ 'ClusterComputeResource', 'resourcePool',
+ False,
+ [rp_to_rp_select_spec,
+ rp_to_vm_select_spec])
+ # Next hop from ResourcePool
+ rp_to_rp = build_traversal_spec(client_factory, 'rp_to_rp', 'ResourcePool',
+ 'resourcePool', False,
+ [rp_to_rp_select_spec,
+ rp_to_vm_select_spec])
+ rp_to_vm = build_traversal_spec(client_factory, 'rp_to_vm', 'ResourcePool',
+ 'vm', False,
+ [rp_to_rp_select_spec,
+ rp_to_vm_select_spec])
+
+ # Get the assorted traversal spec which takes care of the objects to
+ # be searched for from the rootFolder
+ traversal_spec = build_traversal_spec(client_factory, 'visitFolders',
+ 'Folder', 'childEntity', False,
+ [visit_folders_select_spec,
+ h_to_vm, dc_to_hf, dc_to_vmf,
+ cr_to_ds, cr_to_h, cr_to_rp,
+ ccr_to_h, ccr_to_ds, ccr_to_rp,
+ rp_to_rp, rp_to_vm])
+ return traversal_spec
+
+
+def build_property_spec(client_factory, type='VirtualMachine',
+ properties_to_collect=None,
+ all_properties=False):
+ """Builds the Property Spec.
+
+ :param client_factory: Factory to get API input specs
+ :param type: Type of the managed object reference property
+ :param properties_to_collect: Properties of the managed object reference
+ to be collected while traversal filtering
+ :param all_properties: Whether all the properties of managed object
+ reference needs to be collected
+ :return: Property spec
+ """
+ if not properties_to_collect:
+ properties_to_collect = ['name']
+
+ property_spec = client_factory.create('ns0:PropertySpec')
+ property_spec.all = all_properties
+ property_spec.pathSet = properties_to_collect
+ property_spec.type = type
+ return property_spec
+
+
+def build_object_spec(client_factory, root_folder, traversal_specs):
+ """Builds the object Spec.
+
+ :param client_factory: Factory to get API input specs
+ :param root_folder: Root folder reference as the starting point for
+ traversal
+ :param traversal_specs: filter specs required for traversal
+ :return: Object spec
+ """
+ object_spec = client_factory.create('ns0:ObjectSpec')
+ object_spec.obj = root_folder
+ object_spec.skip = False
+ object_spec.selectSet = traversal_specs
+ return object_spec
+
+
+def build_property_filter_spec(client_factory, property_specs, object_specs):
+ """Builds the Property Filter Spec.
+
+ :param client_factory: Factory to get API input specs
+ :param property_specs: Property specs to be collected for filtered objects
+ :param object_specs: Object specs to identify objects to be filtered
+ :return: Property filter spec
+ """
+ property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
+ property_filter_spec.propSet = property_specs
+ property_filter_spec.objectSet = object_specs
+ return property_filter_spec
+
+
+def get_objects(vim, type, props_to_collect=None, all_properties=False):
+ """Gets all managed object references of a specified type.
+
+ :param vim: Vim object
+ :param type: Type of the managed object reference
+ :param props_to_collect: Properties of the managed object reference
+ to be collected
+ :param all_properties: Whether all properties of the managed object
+ reference are to be collected
+ :return: All managed object references of a specified type
+ """
+ if not props_to_collect:
+ props_to_collect = ['name']
+
+ client_factory = vim.client.factory
+ recur_trav_spec = build_recursive_traversal_spec(client_factory)
+ object_spec = build_object_spec(client_factory,
+ vim.service_content.rootFolder,
+ [recur_trav_spec])
+ property_spec = build_property_spec(client_factory, type=type,
+ properties_to_collect=props_to_collect,
+ all_properties=all_properties)
+ property_filter_spec = build_property_filter_spec(client_factory,
+ [property_spec],
+ [object_spec])
+ return vim.RetrieveProperties(vim.service_content.propertyCollector,
+ specSet=[property_filter_spec])
+
+
+def get_object_properties(vim, mobj, properties):
+ """Gets properties of the managed object specified.
+
+ :param vim: Vim object
+ :param mobj: Reference to the managed object
+ :param properties: Properties of the managed object reference
+ to be retrieved
+ :return: Properties of the managed object specified
+ """
+ client_factory = vim.client.factory
+ if mobj is None:
+ return None
+ collector = vim.service_content.propertyCollector
+ property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
+ property_spec = client_factory.create('ns0:PropertySpec')
+ property_spec.all = (properties is None or len(properties) == 0)
+ property_spec.pathSet = properties
+ property_spec.type = mobj._type
+ object_spec = client_factory.create('ns0:ObjectSpec')
+ object_spec.obj = mobj
+ object_spec.skip = False
+ property_filter_spec.propSet = [property_spec]
+ property_filter_spec.objectSet = [object_spec]
+ return vim.RetrieveProperties(collector, specSet=[property_filter_spec])
+
+
+def get_object_property(vim, mobj, property_name):
+ """Gets property of the managed object specified.
+
+ :param vim: Vim object
+ :param mobj: Reference to the managed object
+ :param property_name: Name of the property to be retrieved
+ :return: Property of the managed object specified
+ """
+ props = get_object_properties(vim, mobj, [property_name])
+ prop_val = None
+ if props:
+ prop = None
+ if hasattr(props[0], 'propSet'):
+ # propSet will be set only if the server provides value
+ # for the field
+ prop = props[0].propSet
+ if prop:
+ prop_val = prop[0].val
+ return prop_val
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 VMware, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Driver for virtual machines running on VMware supported datastores.
+"""
+
+from oslo.config import cfg
+
+from cinder import exception
+from cinder.openstack.common import log as logging
+from cinder import units
+from cinder.volume import driver
+from cinder.volume.drivers.vmware import api
+from cinder.volume.drivers.vmware import error_util
+from cinder.volume.drivers.vmware import vim
+from cinder.volume.drivers.vmware import volumeops
+from cinder.volume import volume_types
+
+LOG = logging.getLogger(__name__)
+THIN_VMDK_TYPE = 'thin'
+THICK_VMDK_TYPE = 'thick'
+EAGER_ZEROED_THICK_VMDK_TYPE = 'eagerZeroedThick'
+
+vmdk_opts = [
+ cfg.StrOpt('vmware_host_ip',
+ default=None,
+ help='IP address for connecting to VMware ESX/VC server.'),
+ cfg.StrOpt('vmware_host_username',
+ default=None,
+ help='Username for authenticating with VMware ESX/VC server.'),
+ cfg.StrOpt('vmware_host_password',
+ default=None,
+ help='Password for authenticating with VMware ESX/VC server.',
+ secret=True),
+ cfg.StrOpt('vmware_wsdl_location',
+ default=None,
+ help='Optional VIM service WSDL Location '
+ 'e.g http://<server>/vimService.wsdl. Optional over-ride '
+ 'to default location for bug work-arounds.'),
+ cfg.IntOpt('vmware_api_retry_count',
+ default=10,
+ help='Number of times VMware ESX/VC server API must be '
+ 'retried upon connection related issues.'),
+ cfg.IntOpt('vmware_task_poll_interval',
+ default=5,
+ help='The interval used for polling remote tasks invoked on '
+ 'VMware ESX/VC server.'),
+ cfg.StrOpt('vmware_volume_folder',
+ default='cinder-volumes',
+ help='Name for the folder in the VC datacenter that will '
+ 'contain cinder volumes.')
+]
+
+
+def _get_volume_type_extra_spec(type_id, spec_key, possible_values,
+ default_value):
+ """Get extra spec value.
+
+ If the spec value is not present in the input possible_values, then
+ default_value will be returned.
+ If the type_id is None, then default_value is returned.
+
+ The caller must not consider scope and the implementation adds/removes
+ scope. The scope used here is 'vmware' e.g. key 'vmware:vmdk_type' and
+ so the caller must pass vmdk_type as an input ignoring the scope.
+
+ :param type_id: Volume type ID
+ :param spec_key: Extra spec key
+ :param possible_values: Permitted values for the extra spec
+ :param default_value: Default value for the extra spec incase of an
+ invalid value or if the entry does not exist
+ :return: extra spec value
+ """
+ if type_id:
+ spec_key = ('vmware:%s') % spec_key
+ spec_value = volume_types.get_volume_type_extra_specs(type_id,
+ spec_key)
+ if spec_value in possible_values:
+ LOG.debug(_("Returning spec value %s") % spec_value)
+ return spec_value
+
+ LOG.debug(_("Invalid spec value: %s specified.") % spec_value)
+
+ # Default we return thin disk type
+ LOG.debug(_("Returning default spec value: %s.") % default_value)
+ return default_value
+
+
+class VMwareEsxVmdkDriver(driver.VolumeDriver):
+ """Manage volumes on VMware ESX server."""
+
+ VERSION = '1.0'
+
+ def __init__(self, *args, **kwargs):
+ super(VMwareEsxVmdkDriver, self).__init__(*args, **kwargs)
+ self.configuration.append_config_values(vmdk_opts)
+ self._session = None
+ self._stats = None
+ self._volumeops = None
+
+ @property
+ def session(self):
+ if not self._session:
+ ip = self.configuration.vmware_host_ip
+ username = self.configuration.vmware_host_username
+ password = self.configuration.vmware_host_password
+ api_retry_count = self.configuration.vmware_api_retry_count
+ task_poll_interval = self.configuration.vmware_task_poll_interval
+ wsdl_loc = self.configuration.safe_get('vmware_wsdl_location')
+ self._session = api.VMwareAPISession(ip, username,
+ password, api_retry_count,
+ task_poll_interval,
+ wsdl_loc=wsdl_loc)
+ return self._session
+
+ @property
+ def volumeops(self):
+ if not self._volumeops:
+ self._volumeops = volumeops.VMwareVolumeOps(self.session)
+ return self._volumeops
+
+ def do_setup(self, context):
+ """Perform validations and establish connection to server.
+
+ :param context: Context information
+ """
+
+ # Throw error if required parameters are not set.
+ required_params = ['vmware_host_ip',
+ 'vmware_host_username',
+ 'vmware_host_password']
+ for param in required_params:
+ if not getattr(self.configuration, param, None):
+ raise exception.InvalidInput(_("%s not set.") % param)
+
+ # Create the session object for the first time
+ self._volumeops = volumeops.VMwareVolumeOps(self.session)
+ LOG.info(_("Successfully setup driver: %(driver)s for "
+ "server: %(ip)s.") %
+ {'driver': self.__class__.__name__,
+ 'ip': self.configuration.vmware_host_ip})
+
+ def check_for_setup_error(self):
+ pass
+
+ def get_volume_stats(self, refresh=False):
+ """Obtain status of the volume service.
+
+ :param refresh: Whether to get refreshed information
+ """
+
+ if not self._stats:
+ backend_name = self.configuration.safe_get('volume_backend_name')
+ if not backend_name:
+ backend_name = self.__class__.__name__
+ data = {'volume_backend_name': backend_name,
+ 'vendor_name': 'VMware',
+ 'driver_version': self.VERSION,
+ 'storage_protocol': 'LSI Logic SCSI',
+ 'reserved_percentage': 0,
+ 'total_capacity_gb': 'unknown',
+ 'free_capacity_gb': 'unknown'}
+ self._stats = data
+ return self._stats
+
+ def create_volume(self, volume):
+ """Creates a volume.
+
+ We do not create any backing. We do it only for the first time
+ it is being attached to a virtual machine.
+
+ :param volume: Volume object
+ """
+ pass
+
+ def _delete_volume(self, volume):
+ """Delete the volume backing if it is present.
+
+ :param volume: Volume object
+ """
+ backing = self.volumeops.get_backing(volume['name'])
+ if not backing:
+ LOG.info(_("Backing not available, no operation to be performed."))
+ return
+ self.volumeops.delete_backing(backing)
+
+ def delete_volume(self, volume):
+ """Deletes volume backing.
+
+ :param volume: Volume object
+ """
+ self._delete_volume(volume)
+
+ def _get_volume_group_folder(self, datacenter):
+ """Return vmFolder of datacenter as we cannot create folder in ESX.
+
+ :param datacenter: Reference to the datacenter
+ :return: vmFolder reference of the datacenter
+ """
+ return self.volumeops.get_vmfolder(datacenter)
+
+ def _select_datastore_summary(self, size_bytes, datastores):
+ """Get best summary from datastore list that can accomodate volume.
+
+ The implementation selects datastore based on maximum relative
+ free space, which is (free_space/total_space) and has free space to
+ store the volume backing.
+
+ :param size_bytes: Size in bytes of the volume
+ :param datastores: Datastores from which a choice is to be made
+ for the volume
+ :return: Best datastore summary to be picked for the volume
+ """
+ best_summary = None
+ best_ratio = 0
+ for datastore in datastores:
+ summary = self.volumeops.get_summary(datastore)
+ if summary.freeSpace > size_bytes:
+ ratio = float(summary.freeSpace) / summary.capacity
+ if ratio > best_ratio:
+ best_ratio = ratio
+ best_summary = summary
+
+ if not best_summary:
+ msg = _("Unable to pick datastore to accomodate %(size)s bytes "
+ "from the datastores: %(dss)s.")
+ LOG.error(msg % {'size': size_bytes, 'dss': datastores})
+ raise error_util.VimException(msg %
+ {'size': size_bytes,
+ 'dss': datastores})
+
+ LOG.debug(_("Selected datastore: %s for the volume.") % best_summary)
+ return best_summary
+
+ def _get_folder_ds_summary(self, size_gb, resource_pool, datastores):
+ """Get folder and best datastore summary where volume can be placed.
+
+ :param size_gb: Size of the volume in GB
+ :param resource_pool: Resource pool reference
+ :param datastores: Datastores from which a choice is to be made
+ for the volume
+ :return: Folder and best datastore summary where volume can be
+ placed on
+ """
+ datacenter = self.volumeops.get_dc(resource_pool)
+ folder = self._get_volume_group_folder(datacenter)
+ size_bytes = size_gb * units.GiB
+ datastore_summary = self._select_datastore_summary(size_bytes,
+ datastores)
+ return (folder, datastore_summary)
+
+ @staticmethod
+ def _get_disk_type(volume):
+ """Get disk type from volume type.
+
+ :param volume: Volume object
+ :return: Disk type
+ """
+ return _get_volume_type_extra_spec(volume['volume_type_id'],
+ 'vmdk_type',
+ (THIN_VMDK_TYPE, THICK_VMDK_TYPE,
+ EAGER_ZEROED_THICK_VMDK_TYPE),
+ THIN_VMDK_TYPE)
+
+ def _create_backing(self, volume, host):
+ """Create volume backing under the given host.
+
+ :param volume: Volume object
+ :param host: Reference of the host
+ :return: Reference to the created backing
+ """
+ # Get datastores and resource pool of the host
+ (datastores, resource_pool) = self.volumeops.get_dss_rp(host)
+ # Pick a folder and datastore to create the volume backing on
+ (folder, summary) = self._get_folder_ds_summary(volume['size'],
+ resource_pool,
+ datastores)
+ disk_type = VMwareEsxVmdkDriver._get_disk_type(volume)
+ size_kb = volume['size'] * units.MiB
+ return self.volumeops.create_backing(volume['name'],
+ size_kb,
+ disk_type, folder,
+ resource_pool,
+ host,
+ summary.name)
+
+ def _relocate_backing(self, size_gb, backing, host):
+ pass
+
+ def _create_backing_in_inventory(self, volume):
+ """Creates backing under any suitable host.
+
+ The method tries to pick datastore that can fit the volume under
+ any host in the inventory.
+
+ :param volume: Volume object
+ :return: Reference to the created backing
+ """
+ # Get all hosts
+ hosts = self.volumeops.get_hosts()
+ if not hosts:
+ msg = _("There are no hosts in the inventory.")
+ LOG.error(msg)
+ raise error_util.VimException(msg)
+
+ backing = None
+ for host in hosts:
+ try:
+ host = hosts[0].obj
+ backing = self._create_backing(volume, host)
+ break
+ except error_util.VimException as excep:
+ LOG.warn(_("Unable to find suitable datastore for "
+ "volume: %(vol)s under host: %(host)s. "
+ "More details: %(excep)s") %
+ {'vol': volume['name'], 'host': host, 'excep': excep})
+ if backing:
+ return backing
+ msg = _("Unable to create volume: %(vol)s on the hosts: %(hosts)s.")
+ LOG.error(msg % {'vol': volume['name'], 'hosts': hosts})
+ raise error_util.VimException(msg %
+ {'vol': volume['name'], 'hosts': hosts})
+
+ def _initialize_connection(self, volume, connector):
+ """Get information of volume's backing.
+
+ If the volume does not have a backing yet. It will be created.
+
+ :param volume: Volume object
+ :param connector: Connector information
+ :return: Return connection information
+ """
+ connection_info = {'driver_volume_type': 'vmdk'}
+
+ backing = self.volumeops.get_backing(volume['name'])
+ if 'instance' in connector:
+ # The instance exists
+ instance = vim.get_moref(connector['instance'], 'VirtualMachine')
+ LOG.debug(_("The instance: %s for which initialize connection "
+ "is called, exists.") % instance)
+ # Get host managing the instance
+ host = self.volumeops.get_host(instance)
+ if not backing:
+ # Create a backing in case it does not exist under the
+ # host managing the instance.
+ LOG.info(_("There is no backing for the volume: %s. "
+ "Need to create one.") % volume['name'])
+ backing = self._create_backing(volume, host)
+ else:
+ # Relocate volume is necessary
+ self._relocate_backing(volume['size'], backing, host)
+ else:
+ # The instance does not exist
+ LOG.debug(_("The instance for which initialize connection "
+ "is called, does not exist."))
+ if not backing:
+ # Create a backing in case it does not exist. It is a bad use
+ # case to boot from an empty volume.
+ LOG.warn(_("Trying to boot from an empty volume: %s.") %
+ volume['name'])
+ # Create backing
+ backing = self._create_backing_in_inventory(volume)
+
+ # Set volume's moref value and name
+ connection_info['data'] = {'volume': backing.value,
+ 'volume_id': volume['id']}
+
+ LOG.info(_("Returning connection_info: %(info)s for volume: "
+ "%(volume)s with connector: %(connector)s.") %
+ {'info': connection_info,
+ 'volume': volume['name'],
+ 'connector': connector})
+
+ return connection_info
+
+ def initialize_connection(self, volume, connector):
+ """Allow connection to connector and return connection info.
+
+ The implementation returns the following information:
+ {'driver_volume_type': 'vmdk'
+ 'data': {'volume': $VOLUME_MOREF_VALUE
+ 'volume_id': $VOLUME_ID
+ }
+ }
+
+ :param volume: Volume object
+ :param connector: Connector information
+ :return: Return connection information
+ """
+ return self._initialize_connection(volume, connector)
+
+ def terminate_connection(self, volume, connector, force=False, **kwargs):
+ pass
+
+ def create_export(self, context, volume):
+ pass
+
+ def ensure_export(self, context, volume):
+ pass
+
+ def remove_export(self, context, volume):
+ pass
+
+ def _create_snapshot(self, snapshot):
+ """Creates a snapshot.
+
+ If the volume does not have a backing then simply pass, else create
+ a snapshot.
+
+ :param snapshot: Snapshot object
+ """
+ backing = self.volumeops.get_backing(snapshot['volume_name'])
+ if not backing:
+ LOG.info(_("There is no backing, so will not create "
+ "snapshot: %s.") % snapshot['name'])
+ return
+ self.volumeops.create_snapshot(backing, snapshot['name'],
+ snapshot['display_description'])
+ LOG.info(_("Successfully created snapshot: %s.") % snapshot['name'])
+
+ def create_snapshot(self, snapshot):
+ """Creates a snapshot.
+
+ :param snapshot: Snapshot object
+ """
+ self._create_snapshot(snapshot)
+
+ def _delete_snapshot(self, snapshot):
+ """Delete snapshot.
+
+ If the volume does not have a backing or the snapshot does not exist
+ then simply pass, else delete the snapshot.
+
+ :param snapshot: Snapshot object
+ """
+ backing = self.volumeops.get_backing(snapshot['volume_name'])
+ if not backing:
+ LOG.info(_("There is no backing, and so there is no "
+ "snapshot: %s.") % snapshot['name'])
+ else:
+ self.volumeops.delete_snapshot(backing, snapshot['name'])
+ LOG.info(_("Successfully deleted snapshot: %s.") %
+ snapshot['name'])
+
+ def delete_snapshot(self, snapshot):
+ """Delete snapshot.
+
+ :param snapshot: Snapshot object
+ """
+ self._delete_snapshot(snapshot)
+
+ def _clone_backing_by_copying(self, volume, backing):
+ """Creates volume clone.
+
+ Here we copy the backing on a datastore under the host and then
+ register the copied backing to the inventory.
+ It is assumed here that all the source backing files are in the
+ same folder on the datastore.
+
+ :param volume: New Volume object
+ :param backing: Reference to backing entity that must be cloned
+ :return: Reference to the cloned backing
+ """
+ src_path_name = self.volumeops.get_path_name(backing)
+ # If we have path like /vmfs/volumes/datastore/vm/vm.vmx
+ # we need to use /vmfs/volumes/datastore/vm/ are src_path
+ splits = src_path_name.split('/')
+ last_split = splits[len(splits) - 1]
+ src_path = src_path_name[:-len(last_split)]
+ # Pick a datastore where to create the full clone under same host
+ host = self.volumeops.get_host(backing)
+ (datastores, resource_pool) = self.volumeops.get_dss_rp(host)
+ (folder, summary) = self._get_folder_ds_summary(volume['size'],
+ resource_pool,
+ datastores)
+ dest_path = '[%s] %s' % (summary.name, volume['name'])
+ # Copy source backing files to a destination location
+ self.volumeops.copy_backing(src_path, dest_path)
+ # Register the backing to the inventory
+ dest_path_name = '%s/%s' % (dest_path, last_split)
+ clone = self.volumeops.register_backing(dest_path_name,
+ volume['name'], folder,
+ resource_pool)
+ LOG.info(_("Successfully cloned new backing: %s.") % clone)
+ return clone
+
+ def _create_cloned_volume(self, volume, src_vref):
+ """Creates volume clone.
+
+ If source volume's backing does not exist, then pass.
+ Here we copy the backing on a datastore under the host and then
+ register the copied backing to the inventory.
+ It is assumed here that all the src_vref backing files are in the
+ same folder on the datastore.
+
+ :param volume: New Volume object
+ :param src_vref: Volume object that must be cloned
+ """
+ backing = self.volumeops.get_backing(src_vref['name'])
+ if not backing:
+ LOG.info(_("There is no backing for the source volume: "
+ "%(svol)s. Not creating any backing for the "
+ "volume: %(vol)s.") %
+ {'svol': src_vref['name'],
+ 'vol': volume['name']})
+ return
+ self._clone_backing_by_copying(volume, backing)
+
+ def create_cloned_volume(self, volume, src_vref):
+ """Creates volume clone.
+
+ :param volume: New Volume object
+ :param src_vref: Volume object that must be cloned
+ """
+ self._create_cloned_volume(volume, src_vref)
+
+ def _create_volume_from_snapshot(self, volume, snapshot):
+ """Creates a volume from a snapshot.
+
+ If the snapshot does not exist or source volume's backing does not
+ exist, then pass.
+ Else we perform _create_cloned_volume and then revert the backing to
+ the appropriate snapshot point.
+
+ :param volume: Volume object
+ :param snapshot: Snapshot object
+ """
+ backing = self.volumeops.get_backing(snapshot['volume_name'])
+ if not backing:
+ LOG.info(_("There is no backing for the source snapshot: "
+ "%(snap)s. Not creating any backing for the "
+ "volume: %(vol)s.") %
+ {'snap': snapshot['name'],
+ 'vol': volume['name']})
+ return
+ snapshot_moref = self.volumeops.get_snapshot(backing,
+ snapshot['name'])
+ if not snapshot_moref:
+ LOG.info(_("There is no snapshot point for the snapshoted volume: "
+ "%(snap)s. Not creating any backing for the "
+ "volume: %(vol)s.") %
+ {'snap': snapshot['name'], 'vol': volume['name']})
+ return
+ clone = self._clone_backing_by_copying(volume, backing)
+ # Reverting the clone to the snapshot point.
+ snapshot_moref = self.volumeops.get_snapshot(clone, snapshot['name'])
+ self.volumeops.revert_to_snapshot(snapshot_moref)
+ LOG.info(_("Successfully reverted clone: %(clone)s to snapshot: "
+ "%(snapshot)s.") %
+ {'clone': clone, 'snapshot': snapshot_moref})
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Creates a volume from a snapshot.
+
+ :param volume: Volume object
+ :param snapshot: Snapshot object
+ """
+ self._create_volume_from_snapshot(volume, snapshot)
+
+
+class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
+ """Manage volumes on VMware VC server."""
+
+ def _get_volume_group_folder(self, datacenter):
+ """Get volume group folder.
+
+ Creates a folder under the vmFolder of the input datacenter with the
+ volume group name if it does not exists.
+
+ :param datacenter: Reference to the datacenter
+ :return: Reference to the volume folder
+ """
+ vm_folder = super(VMwareVcVmdkDriver,
+ self)._get_volume_group_folder(datacenter)
+ volume_folder = self.configuration.vmware_volume_folder
+ return self.volumeops.create_folder(vm_folder, volume_folder)
+
+ def _relocate_backing(self, size_gb, backing, host):
+ """Relocate volume backing under host and move to volume_group folder.
+
+ If the volume backing is on a datastore that is visible to the host,
+ then need not do any operation.
+
+ :param size_gb: Size of the volume in GB
+ :param backing: Reference to the backing
+ :param host: Reference to the host
+ """
+ # Check if volume's datastore is visible to host managing
+ # the instance
+ (datastores, resource_pool) = self.volumeops.get_dss_rp(host)
+ datastore = self.volumeops.get_datastore(backing)
+
+ visible_to_host = False
+ for _datastore in datastores:
+ if _datastore.value == datastore.value:
+ visible_to_host = True
+ break
+ if visible_to_host:
+ return
+
+ # The volume's backing is on a datastore that is not visible to the
+ # host managing the instance. We relocate the volume's backing.
+
+ # Pick a folder and datastore to relocate volume backing to
+ (folder, summary) = self._get_folder_ds_summary(size_gb, resource_pool,
+ datastores)
+ LOG.info(_("Relocating volume: %(backing)s to %(ds)s and %(rp)s.") %
+ {'backing': backing, 'ds': summary, 'rp': resource_pool})
+ # Relocate the backing to the datastore and folder
+ self.volumeops.relocate_backing(backing, summary.datastore,
+ resource_pool, host)
+ self.volumeops.move_backing_to_folder(backing, folder)
+
+ @staticmethod
+ def _get_clone_type(volume):
+ """Get clone type from volume type.
+
+ :param volume: Volume object
+ :return: Clone type from the extra spec if present, else return
+ default 'full' clone type
+ """
+ return _get_volume_type_extra_spec(volume['volume_type_id'],
+ 'clone_type',
+ (volumeops.FULL_CLONE_TYPE,
+ volumeops.LINKED_CLONE_TYPE),
+ volumeops.FULL_CLONE_TYPE)
+
+ def _clone_backing(self, volume, backing, snapshot, clone_type):
+ """Clone the backing.
+
+ :param volume: New Volume object
+ :param backing: Reference to the backing entity
+ :param snapshot: Reference to snapshot entity
+ :param clone_type: type of the clone
+ """
+ datastore = None
+ if not clone_type == volumeops.LINKED_CLONE_TYPE:
+ # Pick a datastore where to create the full clone under same host
+ host = self.volumeops.get_host(backing)
+ (datastores, resource_pool) = self.volumeops.get_dss_rp(host)
+ size_bytes = volume['size'] * units.GiB
+ datastore = self._select_datastore_summary(size_bytes,
+ datastores).datastore
+ clone = self.volumeops.clone_backing(volume['name'], backing,
+ snapshot, clone_type, datastore)
+ LOG.info(_("Successfully created clone: %s.") % clone)
+
+ def _create_volume_from_snapshot(self, volume, snapshot):
+ """Creates a volume from a snapshot.
+
+ If the snapshot does not exist or source volume's backing does not
+ exist, then pass.
+
+ :param volume: New Volume object
+ :param snapshot: Reference to snapshot entity
+ """
+ backing = self.volumeops.get_backing(snapshot['volume_name'])
+ if not backing:
+ LOG.info(_("There is no backing for the snapshoted volume: "
+ "%(snap)s. Not creating any backing for the "
+ "volume: %(vol)s.") %
+ {'snap': snapshot['name'], 'vol': volume['name']})
+ return
+ snapshot_moref = self.volumeops.get_snapshot(backing,
+ snapshot['name'])
+ if not snapshot_moref:
+ LOG.info(_("There is no snapshot point for the snapshoted volume: "
+ "%(snap)s. Not creating any backing for the "
+ "volume: %(vol)s.") %
+ {'snap': snapshot['name'], 'vol': volume['name']})
+ return
+ clone_type = VMwareVcVmdkDriver._get_clone_type(volume)
+ self._clone_backing(volume, backing, snapshot_moref, clone_type)
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Creates a volume from a snapshot.
+
+ :param volume: New Volume object
+ :param snapshot: Reference to snapshot entity
+ """
+ self._create_volume_from_snapshot(volume, snapshot)
+
+ def _create_cloned_volume(self, volume, src_vref):
+ """Creates volume clone.
+
+ If source volume's backing does not exist, then pass.
+
+ :param volume: New Volume object
+ :param src_vref: Source Volume object
+ """
+ backing = self.volumeops.get_backing(src_vref['name'])
+ if not backing:
+ LOG.info(_("There is no backing for the source volume: %(src)s. "
+ "Not creating any backing for volume: %(vol)s.") %
+ {'src': src_vref['name'], 'vol': volume['name']})
+ return
+ clone_type = VMwareVcVmdkDriver._get_clone_type(volume)
+ snapshot = None
+ if clone_type == volumeops.LINKED_CLONE_TYPE:
+ # For performing a linked clone, we snapshot the volume and
+ # then create the linked clone out of this snapshot point.
+ name = 'snapshot-%s' % volume['id']
+ snapshot = self.volumeops.create_snapshot(backing, name, None)
+ self._clone_backing(volume, backing, snapshot, clone_type)
+
+ def create_cloned_volume(self, volume, src_vref):
+ """Creates volume clone.
+
+ :param volume: New Volume object
+ :param src_vref: Source Volume object
+ """
+ self._create_cloned_volume(volume, src_vref)
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 VMware, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Implements operations on volumes residing on VMware datastores.
+"""
+
+from cinder.openstack.common import log as logging
+from cinder.volume.drivers.vmware import error_util
+from cinder.volume.drivers.vmware import vim_util
+
+LOG = logging.getLogger(__name__)
+LINKED_CLONE_TYPE = 'linked'
+FULL_CLONE_TYPE = 'full'
+ALREADY_EXISTS = 'AlreadyExists'
+FILE_ALREADY_EXISTS = 'FileAlreadyExists'
+
+
+class VMwareVolumeOps(object):
+ """Manages volume operations."""
+
+ def __init__(self, session):
+ self._session = session
+
+ def get_backing(self, name):
+ """Get the backing based on name.
+
+ :param name: Name of the backing
+ :return: Managed object reference to the backing
+ """
+ vms = self._session.invoke_api(vim_util, 'get_objects',
+ self._session.vim, 'VirtualMachine')
+ for vm in vms:
+ if vm.propSet[0].val == name:
+ return vm.obj
+
+ LOG.debug(_("Did not find any backing with name: %s") % name)
+
+ def delete_backing(self, backing):
+ """Delete the backing.
+
+ :param backing: Managed object reference to the backing
+ """
+ LOG.debug(_("Deleting the VM backing: %s.") % backing)
+ task = self._session.invoke_api(self._session.vim, 'Destroy_Task',
+ backing)
+ LOG.debug(_("Initiated deletion of VM backing: %s.") % backing)
+ self._session.wait_for_task(task)
+ LOG.info(_("Deleted the VM backing: %s.") % backing)
+
+ # TODO(kartikaditya) Keep the methods not specific to volume in
+ # a different file
+ def get_host(self, instance):
+ """Get host under which instance is present.
+
+ :param instance: Managed object reference of the instance VM
+ :return: Host managing the instance VM
+ """
+ return self._session.invoke_api(vim_util, 'get_object_property',
+ self._session.vim, instance,
+ 'runtime.host')
+
+ def get_hosts(self):
+ """Get all host from the inventory.
+
+ :return: All the hosts from the inventory
+ """
+ return self._session.invoke_api(vim_util, 'get_objects',
+ self._session.vim, 'HostSystem')
+
+ def get_dss_rp(self, host):
+ """Get datastores and resource pool of the host.
+
+ :param host: Managed object reference of the host
+ :return: Datastores mounted to the host and resource pool to which
+ the host belongs to
+ """
+ props = self._session.invoke_api(vim_util, 'get_object_properties',
+ self._session.vim, host,
+ ['datastore', 'parent'])
+ # Get datastores and compute resource or cluster compute resource
+ datastores = None
+ compute_resource = None
+ for elem in props:
+ for prop in elem.propSet:
+ if prop.name == 'datastore':
+ datastores = prop.val.ManagedObjectReference
+ elif prop.name == 'parent':
+ compute_resource = prop.val
+ # Get resource pool from compute resource or cluster compute resource
+ resource_pool = self._session.invoke_api(vim_util,
+ 'get_object_property',
+ self._session.vim,
+ compute_resource,
+ 'resourcePool')
+ if not datastores:
+ msg = _("There are no datastores present under %s.")
+ LOG.error(msg % host)
+ raise error_util.VimException(msg % host)
+ return (datastores, resource_pool)
+
+ def _get_parent(self, child, parent_type):
+ """Get immediate parent of given type via 'parent' property.
+
+ :param child: Child entity reference
+ :param parent_type: Entity type of the parent
+ :return: Immediate parent of specific type up the hierarchy via
+ 'parent' property
+ """
+ if not child:
+ return None
+ if child._type == parent_type:
+ return child
+ parent = self._session.invoke_api(vim_util, 'get_object_property',
+ self._session.vim, child, 'parent')
+ return self._get_parent(parent, parent_type)
+
+ def get_dc(self, child):
+ """Get parent datacenter up the hierarchy via 'parent' property.
+
+ :param child: Reference of the child entity
+ :return: Parent Datacenter of the param child entity
+ """
+ return self._get_parent(child, 'Datacenter')
+
+ def get_vmfolder(self, datacenter):
+ """Get the vmFolder.
+
+ :param datacenter: Reference to the datacenter entity
+ :return: vmFolder property of the datacenter
+ """
+ return self._session.invoke_api(vim_util, 'get_object_property',
+ self._session.vim, datacenter,
+ 'vmFolder')
+
+ def create_folder(self, parent_folder, child_folder_name):
+ """Creates child folder with given name under the given parent folder.
+
+ The method first checks if a child folder already exists, if it does,
+ then it returns a moref for the folder, else it creates one and then
+ return the moref.
+
+ :param parent_folder: Reference to the folder entity
+ :param child_folder_name: Name of the child folder
+ :return: Reference to the child folder with input name if it already
+ exists, else create one and return the reference
+ """
+ LOG.debug(_("Creating folder: %(child_folder_name)s under parent "
+ "folder: %(parent_folder)s.") %
+ {'child_folder_name': child_folder_name,
+ 'parent_folder': parent_folder})
+
+ # Get list of child entites for the parent folder
+ prop_val = self._session.invoke_api(vim_util, 'get_object_property',
+ self._session.vim, parent_folder,
+ 'childEntity')
+ child_entities = prop_val.ManagedObjectReference
+
+ # Return if the child folder with input name is already present
+ for child_entity in child_entities:
+ if child_entity._type != 'Folder':
+ continue
+ child_entity_name = self._session.invoke_api(vim_util,
+ 'get_object_property',
+ self._session.vim,
+ child_entity,
+ 'name')
+ if child_entity_name == child_folder_name:
+ LOG.debug(_("Child folder already present: %s.") %
+ child_entity)
+ return child_entity
+
+ # Need to create the child folder
+ child_folder = self._session.invoke_api(self._session.vim,
+ 'CreateFolder', parent_folder,
+ name=child_folder_name)
+ LOG.debug(_("Created child folder: %s.") % child_folder)
+ return child_folder
+
+ def _get_create_spec(self, name, size_kb, disk_type, ds_name):
+ """Return spec for creating volume backing.
+
+ :param name: Name of the backing
+ :param size_kb: Size in KB of the backing
+ :param disk_type: VMDK type for the disk
+ :param ds_name: Datastore name where the disk is to be provisioned
+ :return: Spec for creation
+ """
+ cf = self._session.vim.client.factory
+ controller_device = cf.create('ns0:VirtualLsiLogicController')
+ controller_device.key = -100
+ controller_device.busNumber = 0
+ controller_device.sharedBus = 'noSharing'
+ controller_spec = cf.create('ns0:VirtualDeviceConfigSpec')
+ controller_spec.operation = 'add'
+ controller_spec.device = controller_device
+
+ disk_device = cf.create('ns0:VirtualDisk')
+ disk_device.capacityInKB = size_kb
+ disk_device.key = -101
+ disk_device.unitNumber = 0
+ disk_device.controllerKey = -100
+ disk_device_bkng = cf.create('ns0:VirtualDiskFlatVer2BackingInfo')
+ if disk_type == 'eagerZeroedThick':
+ disk_device_bkng.eagerlyScrub = True
+ elif disk_type == 'thin':
+ disk_device_bkng.thinProvisioned = True
+ disk_device_bkng.fileName = '[%s]' % ds_name
+ disk_device_bkng.diskMode = 'persistent'
+ disk_device.backing = disk_device_bkng
+ disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
+ disk_spec.operation = 'add'
+ disk_spec.fileOperation = 'create'
+ disk_spec.device = disk_device
+
+ vm_file_info = cf.create('ns0:VirtualMachineFileInfo')
+ vm_file_info.vmPathName = '[%s]' % ds_name
+
+ create_spec = cf.create('ns0:VirtualMachineConfigSpec')
+ create_spec.name = name
+ create_spec.guestId = 'otherGuest'
+ create_spec.numCPUs = 1
+ create_spec.memoryMB = 128
+ create_spec.deviceChange = [controller_spec, disk_spec]
+ create_spec.files = vm_file_info
+
+ LOG.debug(_("Spec for creating the backing: %s.") % create_spec)
+ return create_spec
+
+ def create_backing(self, name, size_kb, disk_type,
+ folder, resource_pool, host, ds_name):
+ """Create backing for the volume.
+
+ Creates a VM with one VMDK based on the given inputs.
+
+ :param name: Name of the backing
+ :param size_kb: Size in KB of the backing
+ :param disk_type: VMDK type for the disk
+ :param folder: Folder, where to create the backing under
+ :param resource_pool: Resource pool reference
+ :param host: Host reference
+ :param ds_name: Datastore name where the disk is to be provisioned
+ :return: Reference to the created backing entity
+ """
+ LOG.debug(_("Creating volume backing name: %(name)s "
+ "disk_type: %(disk_type)s size_kb: %(size_kb)s at "
+ "folder: %(folder)s resourse pool: %(resource_pool)s "
+ "datastore name: %(ds_name)s.") %
+ {'name': name, 'disk_type': disk_type, 'size_kb': size_kb,
+ 'folder': folder, 'resource_pool': resource_pool,
+ 'ds_name': ds_name})
+
+ create_spec = self._get_create_spec(name, size_kb, disk_type, ds_name)
+ task = self._session.invoke_api(self._session.vim, 'CreateVM_Task',
+ folder, config=create_spec,
+ pool=resource_pool, host=host)
+ LOG.debug(_("Initiated creation of volume backing: %s.") % name)
+ task_info = self._session.wait_for_task(task)
+ backing = task_info.result
+ LOG.info(_("Successfully created volume backing: %s.") % backing)
+ return backing
+
+ def get_datastore(self, backing):
+ """Get datastore where the backing resides.
+
+ :param backing: Reference to the backing
+ :return: Datastore reference to which the backing belongs
+ """
+ return self._session.invoke_api(vim_util, 'get_object_property',
+ self._session.vim, backing,
+ 'datastore').ManagedObjectReference[0]
+
+ def get_summary(self, datastore):
+ """Get datastore summary.
+
+ :param datastore: Reference to the datastore
+ :return: 'summary' property of the datastore
+ """
+ return self._session.invoke_api(vim_util, 'get_object_property',
+ self._session.vim, datastore,
+ 'summary')
+
+ def _get_relocate_spec(self, datastore, resource_pool, host,
+ disk_move_type):
+ """Return spec for relocating volume backing.
+
+ :param datastore: Reference to the datastore
+ :param resource_pool: Reference to the resource pool
+ :param host: Reference to the host
+ :param disk_move_type: Disk move type option
+ :return: Spec for relocation
+ """
+ cf = self._session.vim.client.factory
+ relocate_spec = cf.create('ns0:VirtualMachineRelocateSpec')
+ relocate_spec.datastore = datastore
+ relocate_spec.pool = resource_pool
+ relocate_spec.host = host
+ relocate_spec.diskMoveType = disk_move_type
+
+ LOG.debug(_("Spec for relocating the backing: %s.") % relocate_spec)
+ return relocate_spec
+
+ def relocate_backing(self, backing, datastore, resource_pool, host):
+ """Relocates backing to the input datastore and resource pool.
+
+ The implementation uses moveAllDiskBackingsAndAllowSharing disk move
+ type.
+
+ :param backing: Reference to the backing
+ :param datastore: Reference to the datastore
+ :param resource_pool: Reference to the resource pool
+ :param host: Reference to the host
+ """
+ LOG.debug(_("Relocating backing: %(backing)s to datastore: %(ds)s "
+ "and resource pool: %(rp)s.") %
+ {'backing': backing, 'ds': datastore, 'rp': resource_pool})
+
+ # Relocate the volume backing
+ disk_move_type = 'moveAllDiskBackingsAndAllowSharing'
+ relocate_spec = self._get_relocate_spec(datastore, resource_pool, host,
+ disk_move_type)
+ task = self._session.invoke_api(self._session.vim, 'RelocateVM_Task',
+ backing, spec=relocate_spec)
+ LOG.debug(_("Initiated relocation of volume backing: %s.") % backing)
+ self._session.wait_for_task(task)
+ LOG.info(_("Successfully relocated volume backing: %(backing)s "
+ "to datastore: %(ds)s and resource pool: %(rp)s.") %
+ {'backing': backing, 'ds': datastore, 'rp': resource_pool})
+
+ def move_backing_to_folder(self, backing, folder):
+ """Move the volume backing to the folder.
+
+ :param backing: Reference to the backing
+ :param folder: Reference to the folder
+ """
+ LOG.debug(_("Moving backing: %(backing)s to folder: %(fol)s.") %
+ {'backing': backing, 'fol': folder})
+ task = self._session.invoke_api(self._session.vim,
+ 'MoveIntoFolder_Task', folder,
+ list=[backing])
+ LOG.debug(_("Initiated move of volume backing: %(backing)s into the "
+ "folder: %(fol)s.") % {'backing': backing, 'fol': folder})
+ self._session.wait_for_task(task)
+ LOG.info(_("Successfully moved volume backing: %(backing)s into the "
+ "folder: %(fol)s.") % {'backing': backing, 'fol': folder})
+
+ def create_snapshot(self, backing, name, description):
+ """Create snapshot of the backing with given name and description.
+
+ :param backing: Reference to the backing entity
+ :param name: Snapshot name
+ :param description: Snapshot description
+ :return: Created snapshot entity reference
+ """
+ LOG.debug(_("Snapshoting backing: %(backing)s with name: %(name)s.") %
+ {'backing': backing, 'name': name})
+ task = self._session.invoke_api(self._session.vim,
+ 'CreateSnapshot_Task',
+ backing, name=name,
+ description=description,
+ memory=False, quiesce=False)
+ LOG.debug(_("Initiated snapshot of volume backing: %(backing)s "
+ "named: %(name)s.") % {'backing': backing, 'name': name})
+ task_info = self._session.wait_for_task(task)
+ snapshot = task_info.result
+ LOG.info(_("Successfully created snapshot: %(snap)s for volume "
+ "backing: %(backing)s.") %
+ {'snap': snapshot, 'backing': backing})
+ return snapshot
+
+ @staticmethod
+ def _get_snapshot_from_tree(name, root):
+ """Get snapshot by name from the snapshot tree root.
+
+ :param name: Snapshot name
+ :param root: Current root node in the snapshot tree
+ :return: None in the snapshot tree with given snapshot name
+ """
+ if not root:
+ return None
+ if root.name == name:
+ return root.snapshot
+ if (not hasattr(root, 'childSnapshotList') or
+ not root.childSnapshotList):
+ # When root does not have children, the childSnapshotList attr
+ # is missing sometime. Adding an additional check.
+ return None
+ for node in root.childSnapshotList:
+ snapshot = VMwareVolumeOps._get_snapshot_from_tree(name, node)
+ if snapshot:
+ return snapshot
+
+ def get_snapshot(self, backing, name):
+ """Get snapshot of the backing with given name.
+
+ :param backing: Reference to the backing entity
+ :param name: Snapshot name
+ :return: Snapshot entity of the backing with given name
+ """
+ snapshot = self._session.invoke_api(vim_util, 'get_object_property',
+ self._session.vim, backing,
+ 'snapshot')
+ if not snapshot or not snapshot.rootSnapshotList:
+ return None
+ for root in snapshot.rootSnapshotList:
+ return VMwareVolumeOps._get_snapshot_from_tree(name, root)
+
+ def delete_snapshot(self, backing, name):
+ """Delete a given snapshot from volume backing.
+
+ :param backing: Reference to the backing entity
+ :param name: Snapshot name
+ """
+ LOG.debug(_("Deleting the snapshot: %(name)s from backing: "
+ "%(backing)s.") %
+ {'name': name, 'backing': backing})
+ snapshot = self.get_snapshot(backing, name)
+ if not snapshot:
+ LOG.info(_("Did not find the snapshot: %(name)s for backing: "
+ "%(backing)s. Need not delete anything.") %
+ {'name': name, 'backing': backing})
+ return
+ task = self._session.invoke_api(self._session.vim,
+ 'RemoveSnapshot_Task',
+ snapshot, removeChildren=False)
+ LOG.debug(_("Initiated snapshot: %(name)s deletion for backing: "
+ "%(backing)s.") %
+ {'name': name, 'backing': backing})
+ self._session.wait_for_task(task)
+ LOG.info(_("Successfully deleted snapshot: %(name)s of backing: "
+ "%(backing)s.") % {'backing': backing, 'name': name})
+
+ def _get_folder(self, backing):
+ """Get parent folder of the backing.
+
+ :param backing: Reference to the backing entity
+ :return: Reference to parent folder of the backing entity
+ """
+ return self._get_parent(backing, 'Folder')
+
+ def _get_clone_spec(self, datastore, disk_move_type, snapshot):
+ """Get the clone spec.
+
+ :param datastore: Reference to datastore
+ :param disk_move_type: Disk move type
+ :param snapshot: Reference to snapshot
+ :return: Clone spec
+ """
+ relocate_spec = self._get_relocate_spec(datastore, None, None,
+ disk_move_type)
+ cf = self._session.vim.client.factory
+ clone_spec = cf.create('ns0:VirtualMachineCloneSpec')
+ clone_spec.location = relocate_spec
+ clone_spec.powerOn = False
+ clone_spec.template = False
+ clone_spec.snapshot = snapshot
+
+ LOG.debug(_("Spec for cloning the backing: %s.") % clone_spec)
+ return clone_spec
+
+ def clone_backing(self, name, backing, snapshot, clone_type, datastore):
+ """Clone backing.
+
+ If the clone_type is 'full', then a full clone of the source volume
+ backing will be created. Else, if it is 'linked', then a linked clone
+ of the source volume backing will be created.
+
+ :param name: Name for the clone
+ :param backing: Reference to the backing entity
+ :param snapshot: Snapshot point from which the clone should be done
+ :param clone_type: Whether a full clone or linked clone is to be made
+ :param datastore: Reference to the datastore entity
+ """
+ LOG.debug(_("Creating a clone of backing: %(back)s, named: %(name)s, "
+ "clone type: %(type)s from snapshot: %(snap)s on "
+ "datastore: %(ds)s") %
+ {'back': backing, 'name': name, 'type': clone_type,
+ 'snap': snapshot, 'ds': datastore})
+ folder = self._get_folder(backing)
+ if clone_type == LINKED_CLONE_TYPE:
+ disk_move_type = 'createNewChildDiskBacking'
+ else:
+ disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
+ clone_spec = self._get_clone_spec(datastore, disk_move_type, snapshot)
+ task = self._session.invoke_api(self._session.vim, 'CloneVM_Task',
+ backing, folder=folder, name=name,
+ spec=clone_spec)
+ LOG.debug(_("Initiated clone of backing: %s.") % name)
+ task_info = self._session.wait_for_task(task)
+ new_backing = task_info.result
+ LOG.info(_("Successfully created clone: %s.") % new_backing)
+ return new_backing
+
+ def _delete_file(self, file_path, datacenter=None):
+ """Delete file or folder on the datastore.
+
+ :param file_path: Datastore path of the file or folder
+ """
+ LOG.debug(_("Deleting file: %(file)s under datacenter: %(dc)s.") %
+ {'file': file_path, 'dc': datacenter})
+ fileManager = self._session.vim.service_content.fileManager
+ task = self._session.invoke_api(self._session.vim,
+ 'DeleteDatastoreFile_Task',
+ fileManager,
+ name=file_path,
+ datacenter=datacenter)
+ LOG.debug(_("Initiated deletion via task: %s.") % task)
+ self._session.wait_for_task(task)
+ LOG.info(_("Successfully deleted file: %s.") % file_path)
+
+ def copy_backing(self, src_folder_path, dest_folder_path):
+ """Copy the backing folder recursively onto the destination folder.
+
+ This method overwrites all the files at the destination if present
+ by deleting them first.
+
+ :param src_folder_path: Datastore path of the source folder
+ :param dest_folder_path: Datastore path of the destination
+ """
+ LOG.debug(_("Copying backing files from %(src)s to %(dest)s.") %
+ {'src': src_folder_path, 'dest': dest_folder_path})
+ fileManager = self._session.vim.service_content.fileManager
+ try:
+ task = self._session.invoke_api(self._session.vim,
+ 'CopyDatastoreFile_Task',
+ fileManager,
+ sourceName=src_folder_path,
+ destinationName=dest_folder_path)
+ LOG.debug(_("Initiated copying of backing via task: %s.") % task)
+ self._session.wait_for_task(task)
+ LOG.info(_("Successfully copied backing to %s.") %
+ dest_folder_path)
+ except error_util.VimFaultException as excep:
+ if FILE_ALREADY_EXISTS not in excep.fault_list:
+ raise excep
+ # There might be files on datastore due to previous failed attempt
+ # We clean the folder up and retry the copy
+ self._delete_file(dest_folder_path)
+ self.copy_backing(src_folder_path, dest_folder_path)
+
+ def get_path_name(self, backing):
+ """Get path name of the backing.
+
+ :param backing: Reference to the backing entity
+ :return: Path name of the backing
+ """
+ return self._session.invoke_api(vim_util, 'get_object_property',
+ self._session.vim, backing,
+ 'config.files').vmPathName
+
+ def register_backing(self, path, name, folder, resource_pool):
+ """Register backing to the inventory.
+
+ :param path: Datastore path to the backing
+ :param name: Name with which we register the backing
+ :param folder: Reference to the folder entity
+ :param resource_pool: Reference to the resource pool entity
+ :return: Reference to the backing that is registered
+ """
+ try:
+ LOG.debug(_("Registering backing at path: %s to inventory.") %
+ path)
+ task = self._session.invoke_api(self._session.vim,
+ 'RegisterVM_Task', folder,
+ path=path, name=name,
+ asTemplate=False,
+ pool=resource_pool)
+ LOG.debug(_("Initiated registring backing, task: %s.") % task)
+ task_info = self._session.wait_for_task(task)
+ backing = task_info.result
+ LOG.info(_("Successfully registered backing: %s.") % backing)
+ return backing
+ except error_util.VimFaultException as excep:
+ if ALREADY_EXISTS not in excep.fault_list:
+ raise excep
+ # If the vmx is already registered to the inventory that may
+ # happen due to previous failed attempts, then we simply retrieve
+ # the backing moref based on name and return.
+ return self.get_backing(name)
+
+ def revert_to_snapshot(self, snapshot):
+ """Revert backing to a snapshot point.
+
+ :param snapshot: Reference to the snapshot entity
+ """
+ LOG.debug(_("Reverting backing to snapshot: %s.") % snapshot)
+ task = self._session.invoke_api(self._session.vim,
+ 'RevertToSnapshot_Task',
+ snapshot)
+ LOG.debug(_("Initiated reverting snapshot via task: %s.") % task)
+ self._session.wait_for_task(task)
+ LOG.info(_("Successfully reverted to snapshot: %s.") % snapshot)
#storwize_svc_multihostmap_enabled=true
+#
+# Options defined in cinder.volume.drivers.vmware.vmdk
+#
+
+# IP address for connecting to VMware ESX/VC server. (string
+# value)
+#vmware_host_ip=<None>
+
+# Username for authenticating with VMware ESX/VC server.
+# (string value)
+#vmware_host_username=<None>
+
+# Password for authenticating with VMware ESX/VC server.
+# (string value)
+#vmware_host_password=<None>
+
+# Optional VIM service WSDL Location e.g
+# http://<server>/vimService.wsdl. Optional over-ride to
+# default location for bug work-arounds. (string value)
+#vmware_wsdl_location=<None>
+
+# Number of times VMware ESX/VC server API must be retried
+# upon connection related issues. (integer value)
+#vmware_api_retry_count=10
+
+# The interval used for polling remote tasks invoked on VMware
+# ESX/VC server. (integer value)
+#vmware_task_poll_interval=5
+
+# Name for the folder in the VC datacenter that will contain
+# cinder volumes. (string value)
+#vmware_volume_folder=cinder-volumes
+
+
#
# Options defined in cinder.volume.drivers.windows
#
#volume_dd_blocksize=1M
-# Total option count: 346
+# Total option count: 353
kombu>=2.4.8
lockfile>=0.8
lxml>=2.3
+netaddr
oslo.config>=1.1.0
paramiko>=1.8.0
Paste
SQLAlchemy>=0.7.8,<=0.7.99
sqlalchemy-migrate>=0.7.2
stevedore>=0.10
+suds>=0.4
WebOb>=1.2.3,<1.3
wsgiref>=0.1.2