fake_vm_create_spec = mock.sentinel.spec
fake_disk_type = 'thin'
vol_name = 'fake_volume name'
- vol_id = '12345'
+ vol_id = 'd11a82de-ddaa-448d-b50a-a255a7e61a1e'
fake_volume = {'name': vol_name,
'id': vol_id,
'size': fake_volume_size,
image_service.show.assert_called_with(fake_context, fake_image_id)
_select_ds_for_volume.assert_called_with(fake_volume)
get_profile_id.assert_called_once_with(fake_volume)
+ extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: vol_id}
volumeops.get_create_spec.assert_called_with(fake_volume['name'],
0,
fake_disk_type,
fake_summary.name,
- profile_id,
- adapter_type)
+ profileId=profile_id,
+ adapter_type=adapter_type,
+ extra_config=extra_config)
self.assertTrue(download_image.called)
download_image.assert_called_with(fake_context, timeout,
image_service,
context = mock.sentinel.context
name = 'vm-1'
- volume = {'name': 'vol-1', 'id': 1, 'size': 1}
+ volume = {'name': 'vol-1',
+ 'id': 'd11a82de-ddaa-448d-b50a-a255a7e61a1e',
+ 'size': 1}
tmp_file_path = mock.sentinel.tmp_file_path
file_size_bytes = units.Gi
ret = self._driver._create_backing_from_stream_optimized_file(
context, name, volume, tmp_file_path, file_size_bytes)
self.assertEqual(vm_ref, ret)
+ extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id']}
vops.get_create_spec.assert_called_once_with(
- name, 0, disk_type, summary.name, profile_id)
+ name, 0, disk_type, summary.name, profileId=profile_id,
+ extra_config=extra_config)
file_open.assert_called_once_with(tmp_file_path, "rb")
download_data.assert_called_once_with(
context, self.IMG_TX_TIMEOUT, tmp_file, session=session,
"""Test _clone_backing with clone type - linked."""
fake_size = 3
fake_volume = {'volume_type_id': None, 'name': 'fake_name',
+ 'id': '51e47214-8e3c-475d-b44b-aea6cd3eef53',
'size': fake_size}
fake_snapshot = {'volume_name': 'volume_name',
'name': 'snapshot_name',
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.LINKED_CLONE_TYPE,
fake_snapshot['volume_size'])
+ extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: fake_volume['id']}
volume_ops.clone_backing.assert_called_with(fake_volume['name'],
fake_backing,
fake_snapshot,
fake_type,
None,
host=None,
- resource_pool=None)
+ resource_pool=None,
+ extra_config=extra_config)
# If the volume size is greater than the original snapshot size,
# _extend_vmdk_virtual_disk will be called.
_extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
fake_summary.datastore = fake_datastore
fake_size = 3
fake_volume = {'volume_type_id': None, 'name': 'fake_name',
+ 'id': '51e47214-8e3c-475d-b44b-aea6cd3eef53',
'size': fake_size}
fake_snapshot = {'volume_name': 'volume_name', 'name': 'snapshot_name',
'volume_size': 2}
volumeops.FULL_CLONE_TYPE,
fake_snapshot['volume_size'])
_select_ds_for_volume.assert_called_with(fake_volume)
+ extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: fake_volume['id']}
volume_ops.clone_backing.assert_called_with(fake_volume['name'],
fake_backing,
fake_snapshot,
fake_datastore,
host=fake_host,
resource_pool=
- fake_resource_pool)
+ fake_resource_pool,
+ extra_config=extra_config)
# If the volume size is greater than the original snapshot size,
# _extend_vmdk_virtual_disk will be called.
_extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
select_ds_for_volume.return_value = (host, resource_pool, folder,
summary)
- volume = {'name': 'vol-1', 'volume_type_id': None, 'size': 1}
+ volume = {'name': 'vol-1', 'volume_type_id': None, 'size': 1,
+ 'id': 'd11a82de-ddaa-448d-b50a-a255a7e61a1e'}
create_params = {vmdk.CREATE_PARAM_DISK_LESS: True}
self._driver._create_backing(volume, host, create_params)
- vops.create_backing_disk_less.assert_called_once_with('vol-1',
- folder,
- resource_pool,
- host,
- summary.name,
- None)
+ extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id']}
+ vops.create_backing_disk_less.assert_called_once_with(
+ 'vol-1',
+ folder,
+ resource_pool,
+ host,
+ summary.name,
+ profileId=None,
+ extra_config=extra_config)
create_params = {vmdk.CREATE_PARAM_ADAPTER_TYPE: 'ide'}
self._driver._create_backing(volume, host, create_params)
resource_pool,
host,
summary.name,
- None,
- 'ide')
+ profileId=None,
+ adapter_type='ide',
+ extra_config=extra_config)
vops.create_backing.reset_mock()
backing_name = "temp-vol"
resource_pool,
host,
summary.name,
- None,
- 'lsiLogic')
+ profileId=None,
+ adapter_type='lsiLogic',
+ extra_config=extra_config)
@mock.patch('cinder.openstack.common.fileutils.ensure_tree')
@mock.patch('cinder.openstack.common.fileutils.delete_if_exists')
name = mock.sentinel.name
ds_name = mock.sentinel.ds_name
profile_id = mock.sentinel.profile_id
- ret = self.vops._get_create_spec_disk_less(name, ds_name, profile_id)
+ option_key = mock.sentinel.key
+ option_value = mock.sentinel.value
+ extra_config = {option_key: option_value}
+ ret = self.vops._get_create_spec_disk_less(name, ds_name, profile_id,
+ extra_config)
factory.create.side_effect = None
self.assertEqual(name, ret.name)
self.assertEqual('[%s]' % ds_name, ret.files.vmPathName)
self.assertEqual("vmx-08", ret.version)
self.assertEqual(profile_id, ret.vmProfile[0].profileId)
+ self.assertEqual(1, len(ret.extraConfig))
+ self.assertEqual(option_key, ret.extraConfig[0].key)
+ self.assertEqual(option_value, ret.extraConfig[0].value)
expected = [mock.call.create('ns0:VirtualMachineFileInfo'),
mock.call.create('ns0:VirtualMachineConfigSpec'),
- mock.call.create('ns0:VirtualMachineDefinedProfileSpec')]
+ mock.call.create('ns0:VirtualMachineDefinedProfileSpec'),
+ mock.call.create('ns0:OptionValue')]
factory.create.assert_has_calls(expected, any_order=True)
+ @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
+ '_get_create_spec_disk_less')
+ @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
+ '_create_specs_for_disk_add')
+ def test_get_create_spec(self, create_specs_for_disk_add,
+ get_create_spec_disk_less):
+ name = 'vol-1'
+ size_kb = 1024
+ disk_type = 'thin'
+ ds_name = 'nfs-1'
+ profileId = mock.sentinel.profile_id
+ adapter_type = 'busLogic'
+ extra_config = mock.sentinel.extra_config
+
+ self.vops.get_create_spec(name, size_kb, disk_type, ds_name,
+ profileId, adapter_type, extra_config)
+
+ get_create_spec_disk_less.assert_called_once_with(
+ name, ds_name, profileId=profileId, extra_config=extra_config)
+ create_specs_for_disk_add.assert_called_once_with(
+ size_kb, disk_type, adapter_type)
+
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'get_create_spec')
def test_create_backing(self, get_create_spec):
host = mock.sentinel.host
ds_name = mock.sentinel.ds_name
profile_id = mock.sentinel.profile_id
+ extra_config = mock.sentinel.extra_config
ret = self.vops.create_backing(name, size_kb, disk_type, folder,
resource_pool, host, ds_name,
- profile_id, adapter_type)
+ profile_id, adapter_type, extra_config)
self.assertEqual(mock.sentinel.result, ret)
- get_create_spec.assert_called_once_with(name, size_kb, disk_type,
- ds_name, profile_id,
- adapter_type)
+ get_create_spec.assert_called_once_with(
+ name, size_kb, disk_type, ds_name, profileId=profile_id,
+ adapter_type=adapter_type, extra_config=extra_config)
self.session.invoke_api.assert_called_once_with(self.session.vim,
'CreateVM_Task',
folder,
host = mock.sentinel.host
ds_name = mock.sentinel.ds_name
profile_id = mock.sentinel.profile_id
+ extra_config = mock.sentinel.extra_config
ret = self.vops.create_backing_disk_less(name, folder, resource_pool,
- host, ds_name, profile_id)
+ host, ds_name, profile_id,
+ extra_config)
self.assertEqual(mock.sentinel.result, ret)
- get_create_spec_disk_less.assert_called_once_with(name, ds_name,
- profile_id)
+ get_create_spec_disk_less.assert_called_once_with(
+ name, ds_name, profileId=profile_id, extra_config=extra_config)
self.session.invoke_api.assert_called_once_with(self.session.vim,
'CreateVM_Task',
folder,
self.assertEqual(folder, ret)
get_parent.assert_called_once_with(backing, 'Folder')
+ def _verify_extra_config(self, option_values, key, value):
+ self.assertEqual(1, len(option_values))
+ self.assertEqual(key, option_values[0].key)
+ self.assertEqual(value, option_values[0].value)
+
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_relocate_spec')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
relocate_spec = mock.sentinel.relocate_spec
get_relocate_spec.return_value = relocate_spec
+ # Test with empty disk type.
datastore = mock.sentinel.datastore
disk_move_type = mock.sentinel.disk_move_type
snapshot = mock.sentinel.snapshot
disk_type = None
backing = mock.sentinel.backing
+ host = mock.sentinel.host
+ rp = mock.sentinel.rp
+ key = mock.sentinel.key
+ value = mock.sentinel.value
+ extra_config = {key: value}
ret = self.vops._get_clone_spec(datastore, disk_move_type, snapshot,
- backing, disk_type)
+ backing, disk_type, host, rp,
+ extra_config)
self.assertEqual(relocate_spec, ret.location)
self.assertFalse(ret.powerOn)
self.assertFalse(ret.template)
self.assertEqual(snapshot, ret.snapshot)
- get_relocate_spec.assert_called_once_with(datastore, None, None,
+ get_relocate_spec.assert_called_once_with(datastore, rp, host,
disk_move_type, disk_type,
None)
+ self._verify_extra_config(ret.config.extraConfig, key, value)
+ # Test with non-empty disk type.
disk_device = mock.sentinel.disk_device
get_disk_device.return_value = disk_device
disk_type = 'thin'
ret = self.vops._get_clone_spec(datastore, disk_move_type, snapshot,
- backing, disk_type)
+ backing, disk_type, host, rp,
+ extra_config)
factory.create.side_effect = None
self.assertEqual(relocate_spec, ret.location)
self.assertFalse(ret.template)
self.assertEqual(snapshot, ret.snapshot)
get_disk_device.assert_called_once_with(backing)
- get_relocate_spec.assert_called_with(datastore, None, None,
+ get_relocate_spec.assert_called_with(datastore, rp, host,
disk_move_type, disk_type,
disk_device)
+ self._verify_extra_config(ret.config.extraConfig, key, value)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_clone_spec')
# verify calls
self.assertEqual(mock.sentinel.new_backing, ret)
disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
- get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
- backing, None, None, None)
+ get_clone_spec.assert_called_with(
+ datastore, disk_move_type, snapshot, backing, None, host=None,
+ resource_pool=None, extra_config=None)
expected = [mock.call(vim_util, 'get_object_property',
self.session.vim, backing, 'parent'),
mock.call(self.session.vim, 'CloneVM_Task', backing,
# verify calls
self.assertEqual(mock.sentinel.new_backing, ret)
disk_move_type = 'createNewChildDiskBacking'
- get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
- backing, None, None, None)
+ get_clone_spec.assert_called_with(
+ datastore, disk_move_type, snapshot, backing, None, host=None,
+ resource_pool=None, extra_config=None)
expected = [mock.call(vim_util, 'get_object_property',
self.session.vim, backing, 'parent'),
mock.call(self.session.vim, 'CloneVM_Task', backing,
folder=folder, name=name, spec=clone_spec)]
self.assertEqual(expected, self.session.invoke_api.mock_calls)
- # Test disk type conversion and target host.
+ # Test with optional params (disk_type, host, resource_pool and
+ # extra_config).
clone_type = None
disk_type = 'thin'
host = mock.sentinel.host
rp = mock.sentinel.rp
+ extra_config = mock.sentinel.extra_config
self.session.invoke_api.reset_mock()
ret = self.vops.clone_backing(name, backing, snapshot, clone_type,
- datastore, disk_type, host, rp)
+ datastore, disk_type, host, rp,
+ extra_config)
self.assertEqual(mock.sentinel.new_backing, ret)
disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
- get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
- backing, disk_type, host, rp)
+ get_clone_spec.assert_called_with(
+ datastore, disk_move_type, snapshot, backing, disk_type, host=host,
+ resource_pool=rp, extra_config=extra_config)
expected = [mock.call(vim_util, 'get_object_property',
self.session.vim, backing, 'parent'),
mock.call(self.session.vim, 'CloneVM_Task', backing,
TMP_IMAGES_DATASTORE_FOLDER_PATH = "cinder_temp/"
+EXTRA_CONFIG_VOLUME_ID_KEY = "cinder.volume.id"
+
vmdk_opts = [
cfg.StrOpt('vmware_host_ip',
default=None,
profile_id = profile.uniqueId
return profile_id
+ def _get_extra_config(self, volume):
+ return {EXTRA_CONFIG_VOLUME_ID_KEY: volume['id']}
+
def _create_backing(self, volume, host=None, create_params=None):
"""Create volume backing under the given host.
backing_name = create_params.get(CREATE_PARAM_BACKING_NAME,
volume['name'])
+ extra_config = self._get_extra_config(volume)
+
# default is a backing with single disk
disk_less = create_params.get(CREATE_PARAM_DISK_LESS, False)
if disk_less:
# create a disk-less backing-- disk can be added later; for e.g.,
# by copying an image
- return self.volumeops.create_backing_disk_less(backing_name,
- folder,
- resource_pool,
- host_ref,
- summary.name,
- profile_id)
+ return self.volumeops.create_backing_disk_less(
+ backing_name,
+ folder,
+ resource_pool,
+ host_ref,
+ summary.name,
+ profileId=profile_id,
+ extra_config=extra_config)
# create a backing with single disk
disk_type = VMwareEsxVmdkDriver._get_disk_type(volume)
resource_pool,
host_ref,
summary.name,
- profile_id,
- adapter_type)
+ profileId=profile_id,
+ adapter_type=adapter_type,
+ extra_config=extra_config)
def _relocate_backing(self, volume, backing, host):
pass
# The size of stream optimized glance image is often suspect,
# so better let VC figure out the disk capacity during import.
dummy_disk_size = 0
- vm_create_spec = self.volumeops.get_create_spec(volume['name'],
- dummy_disk_size,
- disk_type,
- summary.name,
- profile_id,
- adapter_type)
+ extra_config = self._get_extra_config(volume)
+ vm_create_spec = self.volumeops.get_create_spec(
+ volume['name'],
+ dummy_disk_size,
+ disk_type,
+ summary.name,
+ profileId=profile_id,
+ adapter_type=adapter_type,
+ extra_config=extra_config)
# convert vm_create_spec to vm_import_spec
cf = self.session.vim.client.factory
vm_import_spec = cf.create('ns0:VirtualMachineImportSpec')
profile_id = self._get_storage_profile_id(volume)
disk_type = VMwareEsxVmdkDriver._get_disk_type(volume)
- vm_create_spec = self.volumeops.get_create_spec(name,
- 0,
- disk_type,
- summary.name,
- profile_id)
+ extra_config = self._get_extra_config(volume)
+ # We cannot determine the size of a virtual disk created from
+ # streamOptimized disk image. Set size to 0 and let vCenter
+ # figure out the size after virtual disk creation.
+ vm_create_spec = self.volumeops.get_create_spec(
+ name, 0, disk_type, summary.name, profileId=profile_id,
+ extra_config=extra_config)
vm_import_spec.configSpec = vm_create_spec
timeout = self.configuration.vmware_image_transfer_timeout_secs
# Pick a datastore where to create the full clone under any host
(host, rp, _folder, summary) = self._select_ds_for_volume(volume)
datastore = summary.datastore
+ extra_config = self._get_extra_config(volume)
clone = self.volumeops.clone_backing(volume['name'], backing,
snapshot, clone_type, datastore,
- host=host, resource_pool=rp)
+ host=host, resource_pool=rp,
+ extra_config=extra_config)
# If the volume size specified by the user is greater than
# the size of the source volume, the newly created volume will
# allocate the capacity to the size of the source volume in the backend
from oslo_vmware import exceptions
from oslo_vmware import pbm
from oslo_vmware import vim_util
+import six
from six.moves import urllib
from cinder.i18n import _, _LE, _LI
specs.append(controller_spec)
return specs
- def _get_create_spec_disk_less(self, name, ds_name, profileId=None):
+ def _get_extra_config_option_values(self, extra_config):
+
+ cf = self._session.vim.client.factory
+ option_values = []
+
+ for key, value in six.iteritems(extra_config):
+ opt = cf.create('ns0:OptionValue')
+ opt.key = key
+ opt.value = value
+ option_values.append(opt)
+
+ return option_values
+
+ def _get_create_spec_disk_less(self, name, ds_name, profileId=None,
+ extra_config=None):
"""Return spec for creating disk-less backing.
:param name: Name of the backing
:param ds_name: Datastore name where the disk is to be provisioned
- :param profileId: storage profile ID for the backing
+ :param profileId: Storage profile ID for the backing
+ :param extra_config: Key-value pairs to be written to backing's
+ extra-config
:return: Spec for creation
"""
cf = self._session.vim.client.factory
vmProfile.profileId = profileId
create_spec.vmProfile = [vmProfile]
+ if extra_config:
+ create_spec.extraConfig = self._get_extra_config_option_values(
+ extra_config)
+
return create_spec
def get_create_spec(self, name, size_kb, disk_type, ds_name,
- profileId=None, adapter_type='lsiLogic'):
+ profileId=None, adapter_type='lsiLogic',
+ extra_config=None):
"""Return spec for creating backing with a single disk.
:param name: name of the backing
:param ds_name: datastore name where the disk is to be provisioned
:param profileId: storage profile ID for the backing
:param adapter_type: disk adapter type
+ :param extra_config: key-value pairs to be written to backing's
+ extra-config
:return: spec for creation
"""
- create_spec = self._get_create_spec_disk_less(name, ds_name, profileId)
+ create_spec = self._get_create_spec_disk_less(
+ name, ds_name, profileId=profileId, extra_config=extra_config)
create_spec.deviceChange = self._create_specs_for_disk_add(
size_kb, disk_type, adapter_type)
return create_spec
return backing
def create_backing(self, name, size_kb, disk_type, folder, resource_pool,
- host, ds_name, profileId=None, adapter_type='lsiLogic'):
+ host, ds_name, profileId=None, adapter_type='lsiLogic',
+ extra_config=None):
"""Create backing for the volume.
Creates a VM with one VMDK based on the given inputs.
:param resource_pool: Resource pool reference
:param host: Host reference
:param ds_name: Datastore name where the disk is to be provisioned
- :param profileId: storage profile ID to be associated with backing
+ :param profileId: Storage profile ID to be associated with backing
:param adapter_type: Disk adapter type
+ :param extra_config: Key-value pairs to be written to backing's
+ extra-config
:return: Reference to the created backing entity
"""
LOG.debug("Creating volume backing with name: %(name)s "
'ds_name': ds_name, 'profile': profileId, 'host': host,
'adapter_type': adapter_type})
- create_spec = self.get_create_spec(name, size_kb, disk_type, ds_name,
- profileId, adapter_type)
+ create_spec = self.get_create_spec(
+ name, size_kb, disk_type, ds_name, profileId=profileId,
+ adapter_type=adapter_type, extra_config=extra_config)
return self._create_backing_int(folder, resource_pool, host,
create_spec)
def create_backing_disk_less(self, name, folder, resource_pool,
- host, ds_name, profileId=None):
+ host, ds_name, profileId=None,
+ extra_config=None):
"""Create disk-less volume backing.
This type of backing is useful for creating volume from image. The
:param host: Host reference
:param ds_name: Name of the datastore used for VM storage
:param profileId: Storage profile ID to be associated with backing
+ :param extra_config: Key-value pairs to be written to backing's
+ extra-config
:return: Reference to the created backing entity
"""
LOG.debug("Creating disk-less volume backing with name: %(name)s "
'resource_pool': resource_pool, 'host': host,
'ds_name': ds_name})
- create_spec = self._get_create_spec_disk_less(name, ds_name, profileId)
+ create_spec = self._get_create_spec_disk_less(
+ name, ds_name, profileId=profileId, extra_config=extra_config)
return self._create_backing_int(folder, resource_pool, host,
create_spec)
return self._get_parent(backing, 'Folder')
def _get_clone_spec(self, datastore, disk_move_type, snapshot, backing,
- disk_type, host=None, resource_pool=None):
+ disk_type, host=None, resource_pool=None,
+ extra_config=None):
"""Get the clone spec.
:param datastore: Reference to datastore
:param disk_type: Disk type of clone
:param host: Target host
:param resource_pool: Target resource pool
+ :param extra_config: Key-value pairs to be written to backing's
+ extra-config
:return: Clone spec
"""
if disk_type is not None:
clone_spec.template = False
clone_spec.snapshot = snapshot
+ if extra_config:
+ config_spec = cf.create('ns0:VirtualMachineConfigSpec')
+ config_spec.extraConfig = self._get_extra_config_option_values(
+ extra_config)
+ clone_spec.config = config_spec
+
LOG.debug("Spec for cloning the backing: %s.", clone_spec)
return clone_spec
def clone_backing(self, name, backing, snapshot, clone_type, datastore,
- disk_type=None, host=None, resource_pool=None):
+ disk_type=None, host=None, resource_pool=None,
+ extra_config=None):
"""Clone backing.
If the clone_type is 'full', then a full clone of the source volume
:param disk_type: Disk type of the clone
:param host: Target host
:param resource_pool: Target resource pool
+ :param extra_config: Key-value pairs to be written to backing's
+ extra-config
"""
LOG.debug("Creating a clone of backing: %(back)s, named: %(name)s, "
"clone type: %(type)s from snapshot: %(snap)s on "
disk_move_type = 'createNewChildDiskBacking'
else:
disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
- clone_spec = self._get_clone_spec(datastore, disk_move_type, snapshot,
- backing, disk_type, host,
- resource_pool)
+ clone_spec = self._get_clone_spec(
+ datastore, disk_move_type, snapshot, backing, disk_type, host=host,
+ resource_pool=resource_pool, extra_config=extra_config)
task = self._session.invoke_api(self._session.vim, 'CloneVM_Task',
backing, folder=folder, name=name,
spec=clone_spec)