self.url = None
self.user = None
self.passwd = None
- self.extraSpecs = {}
self.masking = emc_vmax_masking.EMCVMAXMasking(prtcl)
self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
self.fast = emc_vmax_fast.EMCVMAXFast(prtcl)
"""
volumeSize = int(self.utils.convert_gb_to_bits(volume['size']))
volumeName = volume['id']
- self.extraSpecs = self._initial_setup(volume)
+ extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
- if self.extraSpecs[ISV3]:
+ if extraSpecs[ISV3]:
rc, volumeDict, storageSystemName = (
- self._create_v3_volume(volume, volumeName, volumeSize))
+ self._create_v3_volume(volume, volumeName, volumeSize,
+ extraSpecs))
else:
rc, volumeDict, storageSystemName = (
- self._create_composite_volume(volume, volumeName, volumeSize))
+ self._create_composite_volume(volume, volumeName, volumeSize,
+ extraSpecs))
# If volume is created as part of a consistency group.
if 'consistencygroup_id' in volume and volume['consistencygroup_id']:
volumeInstance.path,
cgName,
volumeName,
- self.extraSpecs)
+ extraSpecs)
LOG.info(_LI("Leaving create_volume: %(volumeName)s "
"Return code: %(rc)lu "
:raises: VolumeBackendAPIException
"""
LOG.debug("Entering create_volume_from_snapshot.")
- self.extraSpecs = self._initial_setup(volume)
+ extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
snapshotInstance = self._find_lun(snapshot)
storageSystem = snapshotInstance['SystemName']
data=exception_message)
self.provision.delete_clone_relationship(
- self.conn, repservice, syncName, self.extraSpecs)
+ self.conn, repservice, syncName, extraSpecs)
return self._create_cloned_volume(volume, snapshot, False)
self._delete_snapshot(snapshot)
def _remove_members(self, controllerConfigService,
- volumeInstance, connector):
+ volumeInstance, connector, extraSpecs):
"""This method unmaps a volume from a host.
Removes volume from the Device Masking Group that belongs to
ControllerConfigurationService
:param volumeInstance: volume Object
:param connector: the connector object
+ :param extraSpecs: extra specifications
:returns: storageGroupInstanceName
"""
volumeName = volumeInstance['ElementName']
LOG.debug("Detaching volume %s.", volumeName)
return self.masking.remove_and_reset_members(
self.conn, controllerConfigService, volumeInstance,
- volumeName, self.extraSpecs, connector)
+ volumeName, extraSpecs, connector)
def _unmap_lun(self, volume, connector):
"""Unmaps a volume from the host.
:param connector: the connector Object
:raises: VolumeBackendAPIException
"""
- self.extraSpecs = self._initial_setup(volume)
+ extraSpecs = self._initial_setup(volume)
volumename = volume['name']
LOG.info(_LI("Unmap volume: %(volume)s."),
{'volume': volumename})
% {'storage_system': storage_system})
raise exception.VolumeBackendAPIException(data=exception_message)
- self._remove_members(configservice, vol_instance, connector)
+ self._remove_members(configservice, vol_instance, connector,
+ extraSpecs)
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns device and connection info.
:returns: dict -- deviceInfoDict - device information dict
:raises: VolumeBackendAPIException
"""
- self.extraSpecs = self._initial_setup(volume)
+ extraSpecs = self._initial_setup(volume)
volumeName = volume['name']
LOG.info(_LI("Initialize connection: %(volume)s."),
'deviceNumber': deviceNumber})
else:
deviceInfoDict = self._attach_volume(
- volume, connector, True)
+ volume, connector, extraSpecs, True)
else:
- deviceInfoDict = self._attach_volume(volume, connector)
+ deviceInfoDict = self._attach_volume(volume, connector,
+ extraSpecs)
return deviceInfoDict
- def _attach_volume(self, volume, connector, isLiveMigration=None):
+ def _attach_volume(self, volume, connector, extraSpecs,
+ isLiveMigration=None):
"""Attach a volume to a host.
If live migration is being undertaken then the volume
:params volume: the volume object
:params connector: the connector object
+ :param extraSpecs: extra specifications
:param isLiveMigration: boolean, can be None
:returns: dict -- deviceInfoDict
:raises: VolumeBackendAPIException
"""
volumeName = volume['name']
maskingViewDict = self._populate_masking_dict(
- volume, connector)
+ volume, connector, extraSpecs)
if isLiveMigration:
maskingViewDict['isLiveMigration'] = True
else:
maskingViewDict['isLiveMigration'] = False
rollbackDict = self.masking.setup_masking_view(
- self.conn, maskingViewDict, self.extraSpecs)
+ self.conn, maskingViewDict, extraSpecs)
# Find host lun id again after the volume is exported to the host.
deviceInfoDict = self.find_device_number(volume)
:params volume: the volume Object
:params connector: the connector Object
"""
- self.extraSpecs = self._initial_setup(volume)
-
volumename = volume['name']
LOG.info(_LI("Terminate connection: %(volume)s."),
{'volume': volumename})
- self.conn = self._get_ecom_connection()
self._unmap_lun(volume, connector)
def extend_volume(self, volume, newSize):
"""
originalVolumeSize = volume['size']
volumeName = volume['name']
- self.extraSpecs = self._initial_setup(volume)
+ extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
# This is V2
rc, modifiedVolumeDict = self._extend_composite_volume(
- volumeInstance, volumeName, newSize, additionalVolumeSize)
+ volumeInstance, volumeName, newSize, additionalVolumeSize,
+ extraSpecs)
# Check the occupied space of the new extended volume.
extendedVolumeInstance = self.utils.find_volume_instance(
LOG.info(_LI("Migrating using retype Volume: %(volume)s."),
{'volume': volumeName})
- self.extraSpecs = self._initial_setup(volume)
+ extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
volumeInstance = self._find_lun(volume)
{'name': volumeName})
return False
- if self.extraSpecs[ISV3]:
+ if extraSpecs[ISV3]:
return self._slo_workload_migration(volumeInstance, volume, host,
volumeName, volumeStatus,
- new_type)
+ new_type, extraSpecs)
else:
return self._pool_migration(volumeInstance, volume, host,
volumeName, volumeStatus,
- self.extraSpecs[FASTPOLICY], new_type)
+ extraSpecs[FASTPOLICY],
+ new_type, extraSpecs)
def migrate_volume(self, ctxt, volume, host, new_type=None):
"""Migrate volume to another host.
def _migrate_volume(
self, volume, volumeInstance, targetPoolName,
- targetFastPolicyName, sourceFastPolicyName, new_type=None):
+ targetFastPolicyName, sourceFastPolicyName, extraSpecs,
+ new_type=None):
"""Migrate volume to another host.
:param volume: the volume object including the volume_type_id
:param targetPoolName: the target poolName
:param targetFastPolicyName: the target FAST policy name, can be None
:param sourceFastPolicyName: the source FAST policy name, can be None
+ :param extraSpecs: extra specifications
:param new_type: None
:returns: boolean -- True/False
:returns: list -- empty list
self.conn, volumeInstance.path)
moved, rc = self._migrate_volume_from(
- volume, volumeInstance, targetPoolName, sourceFastPolicyName)
+ volume, volumeInstance, targetPoolName, sourceFastPolicyName,
+ extraSpecs)
if moved is False and sourceFastPolicyName is not None:
# Return the volume to the default source fast policy storage
self.conn, volumeInstance.path):
self._migrate_cleanup(self.conn, volumeInstance,
storageSystemName, sourceFastPolicyName,
- volumeName)
+ volumeName, extraSpecs)
else:
# Migrate was successful but still issues.
self._migrate_rollback(
self.conn, volumeInstance, storageSystemName,
- sourceFastPolicyName, volumeName, sourcePoolInstanceName)
+ sourceFastPolicyName, volumeName, sourcePoolInstanceName,
+ extraSpecs)
return moved
if moved is True and targetFastPolicyName is not None:
if not self._migrate_volume_fast_target(
volumeInstance, storageSystemName,
- targetFastPolicyName, volumeName):
+ targetFastPolicyName, volumeName, extraSpecs):
LOG.warn(_LW(
"Attempting a rollback of: %(volumeName)s to "
"original pool %(sourcePoolInstanceName)s."),
'sourcePoolInstanceName': sourcePoolInstanceName})
self._migrate_rollback(
self.conn, volumeInstance, storageSystemName,
- sourceFastPolicyName, volumeName, sourcePoolInstanceName)
+ sourceFastPolicyName, volumeName, sourcePoolInstanceName,
+ extraSpecs)
if rc == 0:
moved = True
def _migrate_rollback(self, conn, volumeInstance,
storageSystemName, sourceFastPolicyName,
- volumeName, sourcePoolInstanceName):
+ volumeName, sourcePoolInstanceName, extraSpecs):
"""Full rollback.
Failed on final step on adding migrated volume to new target
:param sourceFastPolicyName: the source FAST policy name
:param volumeName: the volume Name
:param sourcePoolInstanceName: the instance name of the source pool
+ :param extraSpecs: extra specifications
"""
LOG.warn(_LW("_migrate_rollback on : %(volumeName)s."),
try:
self.provision.migrate_volume_to_storage_pool(
conn, storageRelocationService, volumeInstance.path,
- sourcePoolInstanceName, self.extraSpecs)
+ sourcePoolInstanceName, extraSpecs)
except Exception:
LOG.error(_LE(
"Failed to return volume %(volumeName)s to "
if sourceFastPolicyName is not None:
self.add_to_default_SG(
conn, volumeInstance, storageSystemName, sourceFastPolicyName,
- volumeName)
+ volumeName, extraSpecs)
def _migrate_cleanup(self, conn, volumeInstance,
storageSystemName, sourceFastPolicyName,
- volumeName):
+ volumeName, extraSpecs):
"""If the migrate fails, put volume back to source FAST SG.
:param conn: connection info to ECOM
:param storageSystemName: the storage system name
:param sourceFastPolicyName: the source FAST policy name
:param volumeName: the volume Name
+ :param extraSpecs: extra specifications
"""
LOG.warn(_LW("_migrate_cleanup on : %(volumeName)s."),
if assocStorageGroupInstanceName is None:
self.add_to_default_SG(conn, volumeInstance,
storageSystemName, sourceFastPolicyName,
- volumeName)
+ volumeName, extraSpecs)
# It is in the incorrect storage group.
if (assocStorageGroupInstanceName is not None and
self.provision.remove_device_from_storage_group(
conn, controllerConfigurationService,
assocStorageGroupInstanceName,
- volumeInstance.path, volumeName, self.extraSpecs)
+ volumeInstance.path, volumeName, extraSpecs)
self.add_to_default_SG(
conn, volumeInstance, storageSystemName, sourceFastPolicyName,
- volumeName)
+ volumeName, extraSpecs)
def _migrate_volume_fast_target(
self, volumeInstance, storageSystemName,
- targetFastPolicyName, volumeName):
+ targetFastPolicyName, volumeName, extraSpecs):
"""If the target host is FAST enabled.
If the target host is FAST enabled then we need to add it to the
:param storageSystemName: the storage system name
:param targetFastPolicyName: the target fast policy name
:param volumeName: the volume name
+ :param extraSpecs: extra specifications
:returns: boolean -- True/False
"""
falseRet = False
defaultStorageGroupInstanceName = (
self.fast.get_or_create_default_storage_group(
self.conn, controllerConfigurationService,
- targetFastPolicyName, volumeInstance, self.extraSpecs))
+ targetFastPolicyName, volumeInstance, extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.error(_LE(
"Unable to create or get default storage group for FAST policy"
defaultStorageGroupInstanceName = (
self.fast.add_volume_to_default_storage_group_for_fast_policy(
self.conn, controllerConfigurationService, volumeInstance,
- volumeName, targetFastPolicyName, self.extraSpecs))
+ volumeName, targetFastPolicyName, extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.error(_LE(
"Failed to verify that volume was added to storage group for "
return True
def _migrate_volume_from(self, volume, volumeInstance,
- targetPoolName, sourceFastPolicyName):
+ targetPoolName, sourceFastPolicyName,
+ extraSpecs):
"""Check FAST policies and migrate from source pool.
:param volume: the volume object including the volume_type_id
:param volumeInstance: the volume instance
:param targetPoolName: the target poolName
:param sourceFastPolicyName: the source FAST policy name, can be None
+ :param extraSpecs: extra specifications
:returns: boolean -- True/False
:returns: int -- the return code from migrate operation
"""
if sourceFastPolicyName is not None:
self.remove_from_default_SG(
self.conn, volumeInstance, storageSystemName,
- sourceFastPolicyName, volumeName)
+ sourceFastPolicyName, volumeName, extraSpecs)
# Migrate from one pool to another.
storageRelocationService = self.utils.find_storage_relocation_service(
try:
rc = self.provision.migrate_volume_to_storage_pool(
self.conn, storageRelocationService, volumeInstance.path,
- targetPoolInstanceName, self.extraSpecs)
+ targetPoolInstanceName, extraSpecs)
except Exception as e:
# Rollback by deleting the volume if adding the volume to the
# default storage group were to fail.
LOG.debug("Terminating migration session on: %(volumeName)s.",
{'volumeName': volumeName})
self.provision._terminate_migrate_session(
- self.conn, volumeInstance.path, self.extraSpecs)
+ self.conn, volumeInstance.path, extraSpecs)
if rc == 0:
moved = True
def remove_from_default_SG(
self, conn, volumeInstance, storageSystemName,
- sourceFastPolicyName, volumeName):
+ sourceFastPolicyName, volumeName, extraSpecs):
"""For FAST, remove volume from default storage group.
:param conn: connection info to ECOM
:param storageSystemName: the storage system name
:param sourceFastPolicyName: the source FAST policy name
:param volumeName: the volume Name
+ :param extraSpecs: extra specifications
:raises: VolumeBackendAPIException
"""
controllerConfigurationService = (
self.masking.remove_device_from_default_storage_group(
conn, controllerConfigurationService,
volumeInstance.path, volumeName, sourceFastPolicyName,
- self.extraSpecs))
+ extraSpecs))
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
exceptionMessage = (_(
def add_to_default_SG(
self, conn, volumeInstance, storageSystemName,
- targetFastPolicyName, volumeName):
+ targetFastPolicyName, volumeName, extraSpecs):
"""For FAST, add volume to default storage group.
:param conn: connection info to ECOM
:param storageSystemName: the storage system name
:param targetFastPolicyName: the target FAST policy name
:param volumeName: the volume Name
+ :param extraSpecs: extra specifications
"""
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
self.fast
.add_volume_to_default_storage_group_for_fast_policy(
conn, controllerConfigurationService, volumeInstance,
- volumeName, targetFastPolicyName, self.extraSpecs))
+ volumeName, targetFastPolicyName, extraSpecs))
if assocDefaultStorageGroupName is None:
LOG.error(_LE(
"Failed to add %(volumeName)s "
:raises: VolumeBackendAPIException
"""
try:
- self.extraSpecs, configurationFile = (
+ extraSpecs, configurationFile = (
self._set_config_file_and_get_extra_specs(
volume, volumeTypeId))
isV3 = self.utils.isArrayV3(self.conn, arrayName)
if isV3:
- self.extraSpecs = self._set_v3_extra_specs(
- configurationFile, arrayName)
+ extraSpecs = self._set_v3_extra_specs(
+ configurationFile, arrayName, extraSpecs)
else:
# V2 extra specs.
- self.extraSpecs = self._set_v2_extra_specs(
- configurationFile, arrayName)
+ extraSpecs = self._set_v2_extra_specs(
+ configurationFile, arrayName, extraSpecs)
except Exception:
exceptionMessage = (_(
"Unable to get configuration information necessary to create "
"/etc/cinder/cinder_emc_config_<CONFIG_GROUP>.xml."))
raise exception.VolumeBackendAPIException(data=exceptionMessage)
- return self.extraSpecs
+ return extraSpecs
- def _get_pool_and_storage_system(self):
+ def _get_pool_and_storage_system(self, extraSpecs):
"""Given the extra specs get the pool and storage system name.
+ :param extraSpecs: extra specifications
:returns: poolInstanceName The pool instance name
:returns: string -- the storage system name
:raises: VolumeBackendAPIException
"""
try:
- array = self.extraSpecs[ARRAY]
+ array = extraSpecs[ARRAY]
poolInstanceName, storageSystemStr = self._find_pool_in_array(
- array, self.extraSpecs[POOL], self.extraSpecs[ISV3])
+ array, extraSpecs[POOL], extraSpecs[ISV3])
except Exception:
exceptionMessage = (_(
"You must supply an array in your EMC configuration file."))
return poolInstanceName, storageSystemStr
- def _populate_masking_dict(self, volume, connector):
+ def _populate_masking_dict(self, volume, connector, extraSpecs):
"""Get all the names of the maskingView and subComponents.
:param volume: the volume object
:param connector: the connector object
+ :param extraSpecs: extra specifications
:returns: dict -- a dictionary with masking view information
"""
maskingViewDict = {}
hostName = connector['host']
- poolName = self.extraSpecs[POOL]
- isV3 = self.extraSpecs[ISV3]
+ poolName = extraSpecs[POOL]
+ isV3 = extraSpecs[ISV3]
maskingViewDict['isV3'] = isV3
protocol = self.utils.get_short_protocol_type(self.protocol)
shortHostName = self.utils.get_host_short_name(hostName)
if isV3:
- slo = self.extraSpecs[SLO]
- workload = self.extraSpecs[WORKLOAD]
+ slo = extraSpecs[SLO]
+ workload = extraSpecs[WORKLOAD]
maskingViewDict['slo'] = slo
maskingViewDict['workload'] = workload
maskingViewDict['pool'] = poolName
def _add_volume_to_default_storage_group_on_create(
self, volumeDict, volumeName, storageConfigService,
- storageSystemName, fastPolicyName):
+ storageSystemName, fastPolicyName, extraSpecs):
"""Add the volume to the default storage group for that policy.
On a create when fast policy is enable add the volume to the default
:param storageConfigService: the storage configuration service
:param storageSystemName: the storage system name (String)
:param fastPolicyName: the fast policy name (String)
+ :param extraSpecs: extra specifications
:returns: dict -- maskingViewDict with masking view information
:raises: VolumeBackendAPIException
"""
self.fast.add_volume_to_default_storage_group_for_fast_policy(
self.conn, controllerConfigurationService, volumeInstance,
- volumeName, fastPolicyName, self.extraSpecs)
+ volumeName, fastPolicyName, extraSpecs)
foundStorageGroupInstanceName = (
self.utils.get_storage_group_from_volume(
self.conn, volumeInstance.path))
LOG.error(errorMessage)
self.provision.delete_volume_from_pool(
self.conn, storageConfigService, volumeInstance.path,
- volumeName, self.extraSpecs)
+ volumeName, extraSpecs)
raise exception.VolumeBackendAPIException(data=errorMessage)
def _create_and_get_unbound_volume(
self, conn, storageConfigService, compositeVolumeInstanceName,
- additionalSize):
+ additionalSize, extraSpecs):
"""Create an unbound volume.
Create an unbound volume so it is in the correct state to add to a
:param storageConfigService: the storage config service instance name
:param compositeVolumeInstanceName: the composite volume instance name
:param additionalSize: the size you want to increase the volume by
+ :param extraSpecs: extra specifications
:returns: volume instance modifiedCompositeVolumeInstance
"""
assocPoolInstanceName = self.utils.get_assoc_pool_from_volume(
conn, compositeVolumeInstanceName)
appendVolumeInstance = self._create_and_get_volume_instance(
conn, storageConfigService, assocPoolInstanceName, 'appendVolume',
- additionalSize)
+ additionalSize, extraSpecs)
isVolumeBound = self.utils.is_volume_bound_to_pool(
conn, appendVolumeInstance)
appendVolumeInstance = (
self._unbind_and_get_volume_from_storage_pool(
conn, storageConfigService, assocPoolInstanceName,
- appendVolumeInstance.path, 'appendVolume'))
+ appendVolumeInstance.path, 'appendVolume', extraSpecs))
return appendVolumeInstance
def _create_and_get_volume_instance(
self, conn, storageConfigService, poolInstanceName,
- volumeName, volumeSize):
+ volumeName, volumeSize, extraSpecs):
"""Create and get a new volume.
- :params conn: the connection information to the ecom server
- :params storageConfigService: the storage config service instance name
- :params poolInstanceName: the pool instance name
- :params volumeName: the volume name
- :params volumeSize: the size to create the volume
+ :param conn: the connection information to the ecom server
+ :param storageConfigService: the storage config service instance name
+ :param poolInstanceName: the pool instance name
+ :param volumeName: the volume name
+ :param volumeSize: the size to create the volume
+ :param extraSpecs: extra specifications
:returns: volumeInstance -- the volume instance
"""
volumeDict, _ = (
self.provision.create_volume_from_pool(
self.conn, storageConfigService, volumeName, poolInstanceName,
- volumeSize, self.extraSpecs))
+ volumeSize, extraSpecs))
volumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, volumeName)
return volumeInstance
def _unbind_and_get_volume_from_storage_pool(
self, conn, storageConfigService, poolInstanceName,
- volumeInstanceName, volumeName):
+ volumeInstanceName, volumeName, extraSpecs):
"""Unbind a volume from a pool and return the unbound volume.
:param conn: the connection information to the ecom server
:param poolInstanceName: the pool instance name
:param volumeInstanceName: the volume instance name
:param volumeName: string the volumeName
+ :param extraSpecs: extra specifications
:returns: unboundVolumeInstance -- the unbound volume instance
"""
self.provision.unbind_volume_from_storage_pool(
conn, storageConfigService, poolInstanceName,
volumeInstanceName,
- volumeName, self.extraSpecs))
+ volumeName, extraSpecs))
volumeDict = self.provision.get_volume_dict_from_job(conn, job['Job'])
volumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, volumeName)
def _modify_and_get_composite_volume_instance(
self, conn, elementCompositionServiceInstanceName, volumeInstance,
- appendVolumeInstanceName, volumeName, compositeType):
+ appendVolumeInstanceName, volumeName, compositeType, extraSpecs):
"""Given an existing composite volume add a new composite volume to it.
:param conn: the connection information to the ecom server
:param appendVolumeInstanceName: the appended volume instance name
:param volumeName: the volume name
:param compositeType: concatenated
+ :param extraSpecs: extra specifications
:returns: int -- the return code
:returns: dict -- modifiedVolumeDict - the modified volume dict
"""
if 'True' in isComposite:
rc, job = self.provision.modify_composite_volume(
conn, elementCompositionServiceInstanceName,
- volumeInstance.path, appendVolumeInstanceName, self.extraSpecs)
+ volumeInstance.path, appendVolumeInstanceName, extraSpecs)
elif 'False' in isComposite:
rc, job = self.provision.create_new_composite_volume(
conn, elementCompositionServiceInstanceName,
volumeInstance.path, appendVolumeInstanceName, compositeType,
- self.extraSpecs)
+ extraSpecs)
else:
LOG.error(_LE(
"Unable to determine whether %(volumeName)s is "
def _get_or_create_default_storage_group(
self, conn, storageSystemName, volumeDict, volumeName,
- fastPolicyName):
+ fastPolicyName, extraSpecs):
"""Get or create a default storage group for a fast policy.
:param conn: the connection information to the ecom server
:param volumeDict: the volume dictionary
:param volumeName: the volume name
:param fastPolicyName: the fast policy name
+ :param extraSpecs: extra specifications
:returns: defaultStorageGroupInstanceName
"""
controllerConfigService = (
defaultStorageGroupInstanceName = (
self.fast.get_or_create_default_storage_group(
self.conn, controllerConfigService, fastPolicyName,
- volumeInstance, self.extraSpecs))
+ volumeInstance, extraSpecs))
return defaultStorageGroupInstanceName
def _create_cloned_volume(
:param isSnapshot: boolean -- Defaults to False
:returns: dict -- cloneDict the cloned volume dictionary
"""
- self.extraSpecs = self._initial_setup(cloneVolume)
+ extraSpecs = self._initial_setup(cloneVolume)
sourceName = sourceVolume['name']
cloneName = cloneVolume['name']
'elementname': cloneName,
'sourceelement': sourceInstance.path})
- if self.extraSpecs[ISV3]:
+ if extraSpecs[ISV3]:
rc, cloneDict = self._create_replica_v3(repServiceInstanceName,
cloneVolume,
sourceVolume,
sourceInstance,
- isSnapshot)
+ isSnapshot,
+ extraSpecs)
else:
rc, cloneDict = self._create_clone_v2(repServiceInstanceName,
cloneVolume,
sourceVolume,
sourceInstance,
- isSnapshot)
+ isSnapshot,
+ extraSpecs)
LOG.debug("Leaving _create_cloned_volume: Volume: "
"%(cloneName)s Source Volume: %(sourceName)s "
"Return code: %(rc)lu.",
return cloneDict
def _add_clone_to_default_storage_group(
- self, fastPolicyName, storageSystemName, cloneDict, cloneName):
+ self, fastPolicyName, storageSystemName, cloneDict, cloneName,
+ extraSpecs):
"""Helper function to add clone to the default storage group.
:param fastPolicyName: the fast policy name
:param storageSystemName: the storage system name
:param cloneDict: clone dictionary
:param cloneName: clone name
+ :param extraSpecs: extra specifications
:raises: VolumeBackendAPIException
"""
# Check if the clone/snapshot volume already part of the default sg.
defaultStorageGroupInstanceName = (
self._get_or_create_default_storage_group(
self.conn, storageSystemName, cloneDict, cloneName,
- fastPolicyName))
+ fastPolicyName, extraSpecs))
if defaultStorageGroupInstanceName is None:
exceptionMessage = (_(
"Unable to create or get default storage group for FAST "
self._add_volume_to_default_storage_group_on_create(
cloneDict, cloneName, storageConfigService, storageSystemName,
- fastPolicyName)
+ fastPolicyName, extraSpecs)
def _delete_volume(self, volume):
"""Helper function to delete the specified volume.
rc = -1
errorRet = (rc, volumeName)
- self.extraSpecs = self._initial_setup(volume)
+ extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
volumeInstance = self._find_lun(volume)
deviceId = volumeInstance['DeviceID']
- if self.extraSpecs[ISV3]:
+ if extraSpecs[ISV3]:
storageGroupName = self.utils.get_v3_storage_group_name(
- self.extraSpecs[POOL], self.extraSpecs[SLO],
- self.extraSpecs[WORKLOAD])
+ extraSpecs[POOL], extraSpecs[SLO],
+ extraSpecs[WORKLOAD])
rc = self._delete_from_pool_v3(
storageConfigService, volumeInstance, volumeName,
- deviceId, storageGroupName)
+ deviceId, storageGroupName, extraSpecs)
else:
rc = self._delete_from_pool(storageConfigService, volumeInstance,
volumeName, deviceId,
- self.extraSpecs[FASTPOLICY])
+ extraSpecs[FASTPOLICY],
+ extraSpecs)
return (rc, volumeName)
def _remove_device_from_storage_group(
self, controllerConfigurationService, volumeInstanceName,
- volumeName):
+ volumeName, extraSpecs):
"""Check is volume is part of a storage group prior to delete.
Log a warning if volume is part of storage group.
:param controllerConfigurationService: controller configuration service
:param volumeInstanceName: volume instance name
:param volumeName: volume name (string)
+ :param extraSpecs: extra specifications
"""
storageGroupInstanceNames = (
self.masking.get_associated_masking_groups_from_device(
self.provision.remove_device_from_storage_group(
self.conn, controllerConfigurationService,
storageGroupInstanceName,
- volumeInstanceName, volumeName, self.extraSpecs)
+ volumeInstanceName, volumeName, extraSpecs)
def _find_lunmasking_scsi_protocol_controller(self, storageSystemName,
connector):
LOG.info(_LI("Delete Snapshot: %(snapshot)s."),
{'snapshot': snapshotname})
- self.extraSpecs = self._initial_setup(snapshot)
+ extraSpecs = self._initial_setup(snapshot)
self.conn = self._get_ecom_connection()
- if not self.extraSpecs[ISV3]:
+ if not extraSpecs[ISV3]:
snapshotInstance = self._find_lun(snapshot)
storageSystem = snapshotInstance['SystemName']
'syncName': syncName})
self.provision.delete_clone_relationship(
- self.conn, repservice, syncName, self.extraSpecs, True)
+ self.conn, repservice, syncName, extraSpecs, True)
# Delete the target device.
self._delete_volume(snapshot)
cgName = self.utils.truncate_string(group['id'], 8)
- self.extraSpecs = self._initial_setup(None, volumeTypeId)
+ extraSpecs = self._initial_setup(None, volumeTypeId)
_, storageSystem = (
- self._get_pool_and_storage_system())
+ self._get_pool_and_storage_system(extraSpecs))
self.conn = self._get_ecom_connection()
replicationService = self.utils.find_replication_service(
self.conn, storageSystem)
self.provision.create_consistency_group(
- self.conn, replicationService, cgName, self.extraSpecs)
+ self.conn, replicationService, cgName, extraSpecs)
except Exception as ex:
LOG.error(_LE("Exception: %(ex)s"), {'ex': ex})
exceptionMessage = (_("Failed to create consistency group:"
modelUpdate['status'] = group['status']
volumeTypeId = group['volume_type_id'].replace(",", "")
- self.extraSpecs = self._initial_setup(None, volumeTypeId)
+ extraSpecs = self._initial_setup(None, volumeTypeId)
- _, storageSystem = (
- self._get_pool_and_storage_system())
+ __, storageSystem = (
+ self._get_pool_and_storage_system(extraSpecs))
try:
replicationService = self.utils.find_replication_service(
self.provision.delete_consistency_group(self.conn,
replicationService,
cgInstanceName, cgName,
- self.extraSpecs)
+ extraSpecs)
# Do a bulk delete, a lot faster than single deletes.
if memberInstanceNames:
volumes, modelUpdate = self._do_bulk_delete(
storageSystem, memberInstanceNames, storageConfigservice,
- volumes, modelUpdate, self.extraSpecs[ISV3])
+ volumes, modelUpdate, extraSpecs[ISV3], extraSpecs)
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
return modelUpdate, volumes
def _do_bulk_delete(self, storageSystem, memberInstanceNames,
- storageConfigservice, volumes, modelUpdate, isV3):
+ storageConfigservice, volumes, modelUpdate, isV3,
+ extraSpecs):
"""Do a bulk delete.
:param storageSystem: storage system name
:param volumes: volume objects
:param modelUpdate: dict
:param isV3: boolean
+ :param extraSpecs: extra specifications
:returns: list -- list of volume objects
:returns: dict -- modelUpdate
"""
for memberInstanceName in memberInstanceNames:
self._remove_device_from_storage_group(
controllerConfigurationService, memberInstanceName,
- 'Member Volume')
+ 'Member Volume', extraSpecs)
if isV3:
self.provisionv3.delete_volume_from_pool(
self.conn, storageConfigservice,
- memberInstanceNames, None, self.extraSpecs)
+ memberInstanceNames, None, extraSpecs)
else:
self.provision.delete_volume_from_pool(
self.conn, storageConfigservice,
- memberInstanceNames, None, self.extraSpecs)
+ memberInstanceNames, None, extraSpecs)
for volumeRef in volumes:
volumeRef['status'] = 'deleted'
except Exception:
modelUpdate = {'status': 'available'}
volumeTypeId = consistencyGroup['volume_type_id'].replace(",", "")
- self.extraSpecs = self._initial_setup(None, volumeTypeId)
+ extraSpecs = self._initial_setup(None, volumeTypeId)
self.conn = self._get_ecom_connection()
_, storageSystem = (
- self._get_pool_and_storage_system())
+ self._get_pool_and_storage_system(extraSpecs))
try:
replicationService = self.utils.find_replication_service(
# Create the target consistency group.
targetCgName = self.utils.truncate_string(cgsnapshot['id'], 8)
self.provision.create_consistency_group(
- self.conn, replicationService, targetCgName, self.extraSpecs)
+ self.conn, replicationService, targetCgName, extraSpecs)
targetCgInstanceName = self._find_consistency_group(
replicationService, targetCgName)
LOG.info(_LI("Create target consistency group %(targetCg)s."),
volume = {'size': int(self.utils.convert_bits_to_gbs(
volumeSizeInbits))}
- if self.extraSpecs[ISV3]:
+ if extraSpecs[ISV3]:
_, volumeDict, _ = (
self._create_v3_volume(
- volume, targetVolumeName, volumeSizeInbits))
+ volume, targetVolumeName, volumeSizeInbits,
+ extraSpecs))
else:
_, volumeDict, _ = (
self._create_composite_volume(
- volume, targetVolumeName, volumeSizeInbits))
+ volume, targetVolumeName, volumeSizeInbits,
+ extraSpecs))
targetVolumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, targetVolumeName)
LOG.debug("Create target volume for member volume "
targetVolumeInstance.path,
targetCgName,
targetVolumeName,
- self.extraSpecs)
+ extraSpecs)
# Less than 5 characters relationship name.
relationName = self.utils.truncate_string(cgsnapshot['id'], 5)
- if self.extraSpecs[ISV3]:
+ if extraSpecs[ISV3]:
self.provisionv3.create_group_replica(
self.conn, replicationService, cgInstanceName,
- targetCgInstanceName, relationName, self.extraSpecs)
+ targetCgInstanceName, relationName, extraSpecs)
else:
self.provision.create_group_replica(
self.conn, replicationService, cgInstanceName,
- targetCgInstanceName, relationName, self.extraSpecs)
+ targetCgInstanceName, relationName, extraSpecs)
# Break the replica group relationship.
rgSyncInstanceName = self.utils.find_group_sync_rg_by_target(
self.conn, storageSystem, targetCgInstanceName, True)
storageSystem)
raise exception.VolumeBackendAPIException(
data=exception_message)
- if self.extraSpecs[ISV3]:
+ if extraSpecs[ISV3]:
# Operation 7: dissolve for snapVx.
operation = self.utils.get_num(9, '16')
self.provisionv3.break_replication_relationship(
self.conn, repservice, rgSyncInstanceName, operation,
- self.extraSpecs)
+ extraSpecs)
else:
self.provision.delete_clone_relationship(self.conn, repservice,
rgSyncInstanceName,
- self.extraSpecs)
+ extraSpecs)
except Exception as ex:
modelUpdate['status'] = 'error'
modelUpdate = {'status': 'deleted'}
volumeTypeId = consistencyGroup['volume_type_id'].replace(",", "")
- self.extraSpecs = self._initial_setup(None, volumeTypeId)
+ extraSpecs = self._initial_setup(None, volumeTypeId)
self.conn = self._get_ecom_connection()
- _, storageSystem = (
- self._get_pool_and_storage_system())
+ __, storageSystem = (
+ self._get_pool_and_storage_system(extraSpecs))
try:
targetCgName = self.utils.truncate_string(cgsnapshot['id'], 8)
modelUpdate, snapshots = self._delete_cg_and_members(
storageSystem, targetCgName, modelUpdate,
- snapshots)
+ snapshots, extraSpecs)
except Exception as ex:
modelUpdate['status'] = 'error_deleting'
self.utils.populate_cgsnapshot_status(
return memberInstanceNames
def _create_composite_volume(
- self, volume, volumeName, volumeSize):
+ self, volume, volumeName, volumeSize, extraSpecs):
"""Create a composite volume (V2).
:param volume: the volume object
:param volumeName: the name of the volume
:param volumeSize: the size of the volume
+ :param extraSpecs: extra specifications
:returns: int -- return code
:returns: dict -- volumeDict
:returns: string -- storageSystemName
:raises: VolumeBackendAPIException
"""
memberCount, errorDesc = self.utils.determine_member_count(
- volume['size'], self.extraSpecs[MEMBERCOUNT],
- self.extraSpecs[COMPOSITETYPE])
+ volume['size'], extraSpecs[MEMBERCOUNT],
+ extraSpecs[COMPOSITETYPE])
if errorDesc is not None:
exceptionMessage = (_("The striped meta count of %(memberCount)s "
"is too small for volume: %(volumeName)s "
raise exception.VolumeBackendAPIException(data=exceptionMessage)
poolInstanceName, storageSystemName = (
- self._get_pool_and_storage_system())
+ self._get_pool_and_storage_system(extraSpecs))
LOG.debug("Create Volume: %(volume)s Pool: %(pool)s "
"Storage System: %(storageSystem)s "
# If FAST is intended to be used we must first check that the pool
# is associated with the correct storage tier.
- if self.extraSpecs[FASTPOLICY] is not None:
+ if extraSpecs[FASTPOLICY] is not None:
foundPoolInstanceName = self.fast.get_pool_associated_to_policy(
- self.conn, self.extraSpecs[FASTPOLICY], self.extraSpecs[ARRAY],
+ self.conn, extraSpecs[FASTPOLICY], extraSpecs[ARRAY],
storageConfigService, poolInstanceName)
if foundPoolInstanceName is None:
exceptionMessage = (_("Pool: %(poolName)s. "
"is not associated to storage tier for "
"fast policy %(fastPolicy)s.")
- % {'poolName': self.extraSpecs[POOL],
+ % {'poolName': extraSpecs[POOL],
'fastPolicy':
- self.extraSpecs[FASTPOLICY]})
+ extraSpecs[FASTPOLICY]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
compositeType = self.utils.get_composite_type(
- self.extraSpecs[COMPOSITETYPE])
+ extraSpecs[COMPOSITETYPE])
volumeDict, rc = self.provision.create_composite_volume(
self.conn, elementCompositionService, volumeSize, volumeName,
- poolInstanceName, compositeType, memberCount, self.extraSpecs)
+ poolInstanceName, compositeType, memberCount, extraSpecs)
# Now that we have already checked that the pool is associated with
# the correct storage tier and the volume was successfully created
# add the volume to the default storage group created for
# volumes in pools associated with this fast policy.
- if self.extraSpecs[FASTPOLICY]:
+ if extraSpecs[FASTPOLICY]:
LOG.info(_LI(
"Adding volume: %(volumeName)s to default storage group"
" for FAST policy: %(fastPolicyName)s."),
{'volumeName': volumeName,
- 'fastPolicyName': self.extraSpecs[FASTPOLICY]})
+ 'fastPolicyName': extraSpecs[FASTPOLICY]})
defaultStorageGroupInstanceName = (
self._get_or_create_default_storage_group(
self.conn, storageSystemName, volumeDict,
- volumeName, self.extraSpecs[FASTPOLICY]))
+ volumeName, extraSpecs[FASTPOLICY], extraSpecs))
if not defaultStorageGroupInstanceName:
exceptionMessage = (_(
"Unable to create or get default storage group for "
"FAST policy: %(fastPolicyName)s.")
- % {'fastPolicyName': self.extraSpecs[FASTPOLICY]})
+ % {'fastPolicyName': extraSpecs[FASTPOLICY]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
self._add_volume_to_default_storage_group_on_create(
volumeDict, volumeName, storageConfigService,
- storageSystemName, self.extraSpecs[FASTPOLICY])
+ storageSystemName, extraSpecs[FASTPOLICY], extraSpecs)
return rc, volumeDict, storageSystemName
def _create_v3_volume(
- self, volume, volumeName, volumeSize):
+ self, volume, volumeName, volumeSize, extraSpecs):
"""Create a volume (V3).
:param volume: the volume object
:param volumeName: the volume name
:param volumeSize: the volume size
+ :param extraSpecs: extra specifications
:returns: int -- return code
:returns: dict -- volumeDict
:returns: string -- storageSystemName
:raises: VolumeBackendAPIException
"""
isValidSLO, isValidWorkload = self.utils.verify_slo_workload(
- self.extraSpecs[SLO], self.extraSpecs[WORKLOAD])
+ extraSpecs[SLO], extraSpecs[WORKLOAD])
if not isValidSLO or not isValidWorkload:
exceptionMessage = (_(
"Either SLO: %(slo)s or workload %(workload)s is invalid. "
"Examine previous error statement for valid values.")
- % {'slo': self.extraSpecs[SLO],
- 'workload': self.extraSpecs[WORKLOAD]})
+ % {'slo': extraSpecs[SLO],
+ 'workload': extraSpecs[WORKLOAD]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
poolInstanceName, storageSystemName = (
- self._get_pool_and_storage_system())
+ self._get_pool_and_storage_system(extraSpecs))
LOG.debug("Create Volume: %(volume)s Pool: %(pool)s "
"Storage System: %(storageSystem)s "
maximumVolumeSize, minimumVolumeSize = (
self.provisionv3.get_volume_range(
self.conn, storageConfigService, poolInstanceName,
- self.extraSpecs[SLO], self.extraSpecs[WORKLOAD],
- self.extraSpecs))
+ extraSpecs[SLO], extraSpecs[WORKLOAD],
+ extraSpecs))
if not self.utils.is_in_range(
volumeSize, maximumVolumeSize, minimumVolumeSize):
LOG.warn(_LW(
'volumeSize': volumeSize,
'minimumVolumeSize': minimumVolumeSize,
'maximumVolumeSize': maximumVolumeSize,
- 'slo': self.extraSpecs[SLO],
- 'workload': self.extraSpecs[WORKLOAD]
+ 'slo': extraSpecs[SLO],
+ 'workload': extraSpecs[WORKLOAD]
})
# A volume created without specifying a storage group during
# creation time is allocated from the default SRP pool and
# assigned the optimized SLO.
sgInstanceName = self._get_or_create_storage_group_v3(
- self.extraSpecs[POOL], self.extraSpecs[SLO],
- self.extraSpecs[WORKLOAD], storageSystemName)
+ extraSpecs[POOL], extraSpecs[SLO],
+ extraSpecs[WORKLOAD], storageSystemName, extraSpecs)
volumeDict, rc = self.provisionv3.create_volume_from_sg(
self.conn, storageConfigService, volumeName,
- sgInstanceName, volumeSize, self.extraSpecs)
+ sgInstanceName, volumeSize, extraSpecs)
return rc, volumeDict, storageSystemName
def _get_or_create_storage_group_v3(
- self, poolName, slo, workload, storageSystemName):
+ self, poolName, slo, workload, storageSystemName, extraSpecs):
"""Get or create storage group_v3 (V3).
:param poolName: the SRP pool nsmr
:param slo: the SLO
:param workload: the workload
:param storageSystemName: storage system name
+ :param extraSpecs: extra specifications
:returns: sgInstanceName
"""
storageGroupName = self.utils.get_v3_storage_group_name(
if sgInstanceName is None:
sgInstanceName = self.provisionv3.create_storage_group_v3(
self.conn, controllerConfigService, storageGroupName,
- poolName, slo, workload, self.extraSpecs)
+ poolName, slo, workload, extraSpecs)
return sgInstanceName
def _extend_composite_volume(self, volumeInstance, volumeName,
- newSize, additionalVolumeSize):
+ newSize, additionalVolumeSize, extraSpecs):
"""Extend a composite volume (V2).
:param volumeInstance: the volume instance
:param volumeName: the name of the volume
:param newSize: in GBs
:param additionalVolumeSize: additional volume size
+ :param extraSpecs: extra specifications
:returns: int -- return code
:returns: dict -- modifiedVolumeDict
:raises: VolumeBackendAPIException
# newSize - oldSize = additionalVolumeSize.
unboundVolumeInstance = self._create_and_get_unbound_volume(
self.conn, storageConfigService, volumeInstance.path,
- additionalVolumeSize)
+ additionalVolumeSize, extraSpecs)
if unboundVolumeInstance is None:
exceptionMessage = (_(
"Error Creating unbound volume on an Extend operation."))
rc, modifiedVolumeDict = (
self._modify_and_get_composite_volume_instance(
self.conn, elementCompositionService, volumeInstance,
- unboundVolumeInstance.path, volumeName, compositeType))
+ unboundVolumeInstance.path, volumeName, compositeType,
+ extraSpecs))
if modifiedVolumeDict is None:
exceptionMessage = (_(
"On an Extend Operation, error adding volume to composite "
return rc, modifiedVolumeDict
def _slo_workload_migration(self, volumeInstance, volume, host,
- volumeName, volumeStatus, newType):
+ volumeName, volumeStatus, newType,
+ extraSpecs):
"""Migrate from SLO/Workload combination to another (V3).
:param volumeInstance: the volume instance
:param volumeName: the name of the volume
:param volumeStatus: the volume status
:param newType: the type to migrate to
+ :param extraSpecs: extra specifications
:returns: boolean -- True if migration succeeded, False if error.
"""
volumeInstanceName = volumeInstance.path
isValid, targetSlo, targetWorkload = (
self._is_valid_for_storage_assisted_migration_v3(
- volumeInstanceName, host, self.extraSpecs[ARRAY],
- self.extraSpecs[POOL], volumeName, volumeStatus))
+ volumeInstanceName, host, extraSpecs[ARRAY],
+ extraSpecs[POOL], volumeName, volumeStatus))
storageSystemName = volumeInstance['SystemName']
'sourceHost': volume['host'],
'targetHost': host['host']})
return self._migrate_volume_v3(
- volume, volumeInstance, self.extraSpecs[POOL], targetSlo,
- targetWorkload, storageSystemName, newType)
+ volume, volumeInstance, extraSpecs[POOL], targetSlo,
+ targetWorkload, storageSystemName, newType, extraSpecs)
return False
def _migrate_volume_v3(
self, volume, volumeInstance, poolName, targetSlo,
- targetWorkload, storageSystemName, newType):
+ targetWorkload, storageSystemName, newType, extraSpecs):
"""Migrate from one slo/workload combination to another (V3).
This requires moving the volume from it's current SG to a
:param targetWorkload: the target workload
:param storageSystemName: the storage system name
:param newType: the type to migrate to
+ :param extraSpecs: extra specifications
:returns: boolean -- True if migration succeeded, False if error.
"""
volumeName = volume['name']
controllerConfigService,
foundStorageGroupInstanceName,
volumeInstance.path,
- volumeName, self.extraSpecs)
+ volumeName, extraSpecs)
# Check that it has been removed.
sgFromVolRemovedInstanceName = (
self.utils.wrap_get_storage_group_from_volume(
poolName, targetSlo, targetWorkload)
targetSgInstanceName = self._get_or_create_storage_group_v3(
- poolName, targetSlo, targetWorkload, storageSystemName)
+ poolName, targetSlo, targetWorkload, storageSystemName,
+ extraSpecs)
if targetSgInstanceName is None:
LOG.error(_LE(
"Failed to get or create storage group %(storageGroupName)s."),
self.masking.add_volume_to_storage_group(
self.conn, controllerConfigService, targetSgInstanceName,
- volumeInstance, volumeName, storageGroupName, self.extraSpecs)
+ volumeInstance, volumeName, storageGroupName, extraSpecs)
# Check that it has been added.
sgFromVolAddedInstanceName = (
self.utils.get_storage_group_from_volume(
def _pool_migration(self, volumeInstance, volume, host,
volumeName, volumeStatus,
- fastPolicyName, newType):
+ fastPolicyName, newType, extraSpecs):
"""Migrate from one pool to another (V2).
:param volumeInstance: the volume instance
:param volumeStatus: the volume status
:param fastPolicyName: the FAST policy Name
:param newType: the type to migrate to
+ :param extraSpecs: extra specifications
:returns: boolean -- True if migration succeeded, False if error.
"""
storageSystemName = volumeInstance['SystemName']
'targetHost': host['host']})
return self._migrate_volume(
volume, volumeInstance, targetPoolName, targetFastPolicyName,
- fastPolicyName, newType)
+ fastPolicyName, extraSpecs, newType)
return False
return location_info, total_capacity_gb, free_capacity_gb
- def _set_v2_extra_specs(self, configurationFile, arrayName):
+ def _set_v2_extra_specs(self, configurationFile, arrayName, extraSpecs):
"""Set the VMAX V2 extra specs.
:param configurationFile: the EMC configuration file
:param arrayName: the array serial number
+ :param extraSpecs: extra specifications
:returns: dict -- the extraSpecs
:raises: VolumeBackendAPIException
"""
try:
- stripedMetaCount = self.extraSpecs[STRIPECOUNT]
- self.extraSpecs[MEMBERCOUNT] = stripedMetaCount
- self.extraSpecs[COMPOSITETYPE] = STRIPED
+ stripedMetaCount = extraSpecs[STRIPECOUNT]
+ extraSpecs[MEMBERCOUNT] = stripedMetaCount
+ extraSpecs[COMPOSITETYPE] = STRIPED
LOG.debug(
"There are: %(stripedMetaCount)s striped metas in "
{'stripedMetaCount': stripedMetaCount})
except KeyError:
memberCount = '1'
- self.extraSpecs[MEMBERCOUNT] = memberCount
- self.extraSpecs[COMPOSITETYPE] = CONCATENATED
+ extraSpecs[MEMBERCOUNT] = memberCount
+ extraSpecs[COMPOSITETYPE] = CONCATENATED
LOG.debug("StripedMetaCount is not in the extra specs.")
poolName = self.utils.parse_pool_name_from_file(configurationFile)
LOG.debug("The fast policy name is: %(fastPolicyName)s.",
{'fastPolicyName': fastPolicyName})
- self.extraSpecs[POOL] = poolName
- self.extraSpecs[ARRAY] = arrayName
- self.extraSpecs[FASTPOLICY] = fastPolicyName
- self.extraSpecs[ISV3] = False
- self.extraSpecs = self._get_job_extra_specs(configurationFile)
+ extraSpecs[POOL] = poolName
+ extraSpecs[ARRAY] = arrayName
+ extraSpecs[FASTPOLICY] = fastPolicyName
+ extraSpecs[ISV3] = False
+ extraSpecs = self._get_job_extra_specs(configurationFile, extraSpecs)
LOG.debug("Pool is: %(pool)s "
"Array is: %(array)s "
"FastPolicy is: %(fastPolicy)s "
"CompositeType is: %(compositeType)s "
"MemberCount is: %(memberCount)s.",
- {'pool': self.extraSpecs[POOL],
- 'array': self.extraSpecs[ARRAY],
- 'fastPolicy': self.extraSpecs[FASTPOLICY],
- 'compositeType': self.extraSpecs[COMPOSITETYPE],
- 'memberCount': self.extraSpecs[MEMBERCOUNT]})
- return self.extraSpecs
-
- def _set_v3_extra_specs(self, configurationFile, arrayName):
+ {'pool': extraSpecs[POOL],
+ 'array': extraSpecs[ARRAY],
+ 'fastPolicy': extraSpecs[FASTPOLICY],
+ 'compositeType': extraSpecs[COMPOSITETYPE],
+ 'memberCount': extraSpecs[MEMBERCOUNT]})
+ return extraSpecs
+
+ def _set_v3_extra_specs(self, configurationFile, arrayName, extraSpecs):
"""Set the VMAX V3 extra specs.
If SLO or workload are not specified then the default
:param arrayName: the array serial number
:returns: dict -- the extraSpecs
"""
- self.extraSpecs[SLO] = self.utils.parse_slo_from_file(
+ extraSpecs[SLO] = self.utils.parse_slo_from_file(
configurationFile)
- self.extraSpecs[WORKLOAD] = self.utils.parse_workload_from_file(
+ extraSpecs[WORKLOAD] = self.utils.parse_workload_from_file(
configurationFile)
- self.extraSpecs[POOL] = self.utils.parse_pool_name_from_file(
+ extraSpecs[POOL] = self.utils.parse_pool_name_from_file(
configurationFile)
- self.extraSpecs[ARRAY] = arrayName
- self.extraSpecs[ISV3] = True
- self.extraSpecs = self._get_job_extra_specs(configurationFile)
+ extraSpecs[ARRAY] = arrayName
+ extraSpecs[ISV3] = True
+ extraSpecs = self._get_job_extra_specs(configurationFile, extraSpecs)
LOG.debug("Pool is: %(pool)s "
"Array is: %(array)s "
"SLO is: %(slo)s "
"Workload is: %(workload)s.",
- {'pool': self.extraSpecs[POOL],
- 'array': self.extraSpecs[ARRAY],
- 'slo': self.extraSpecs[SLO],
- 'workload': self.extraSpecs[WORKLOAD]})
- return self.extraSpecs
+ {'pool': extraSpecs[POOL],
+ 'array': extraSpecs[ARRAY],
+ 'slo': extraSpecs[SLO],
+ 'workload': extraSpecs[WORKLOAD]})
+ return extraSpecs
- def _get_job_extra_specs(self, configurationFile):
+ def _get_job_extra_specs(self, configurationFile, extraSpecs):
"""Get user defined extra specs around job intervals and retries.
:param configurationFile: the EMC configuration file
- :returns: dict -- extraSpecs
+ :param extraSpecs: extraSpecs (in)
+ :returns: extraSpecs (out)
"""
intervalInSecs = self.utils.parse_interval_from_file(
configurationFile)
if intervalInSecs is not None:
LOG.debug("The user defined interval is : %(intervalInSecs)s.",
{'intervalInSecs': intervalInSecs})
- self.extraSpecs[INTERVAL] = intervalInSecs
+ extraSpecs[INTERVAL] = intervalInSecs
retries = self.utils.parse_retries_from_file(
configurationFile)
if retries is not None:
LOG.debug("The user defined retries is : %(retries)s.",
{'retries': retries})
- self.extraSpecs[RETRIES] = retries
+ extraSpecs[RETRIES] = retries
- return self.extraSpecs
+ return extraSpecs
def _delete_from_pool(self, storageConfigService, volumeInstance,
- volumeName, deviceId, fastPolicyName):
+ volumeName, deviceId, fastPolicyName, extraSpecs):
"""Delete from pool (v2).
:param storageConfigService: the storage config service
:param volumeName: the volume Name
:param deviceId: the device ID of the volume
:param fastPolicyName: the FAST policy name(if it exists)
+ :param extraSpecs: extra specifications
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
self.masking.remove_device_from_default_storage_group(
self.conn, controllerConfigurationService,
volumeInstance.path, volumeName, fastPolicyName,
- self.extraSpecs))
+ extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.warn(_LW(
"The volume: %(volumename)s. was not first part of the "
# Check if it is part of another storage group.
self._remove_device_from_storage_group(
controllerConfigurationService,
- volumeInstance.path, volumeName)
+ volumeInstance.path, volumeName, extraSpecs)
else:
# Check if volume is part of a storage group.
self._remove_device_from_storage_group(
controllerConfigurationService,
- volumeInstance.path, volumeName)
+ volumeInstance.path, volumeName, extraSpecs)
LOG.debug("Delete Volume: %(name)s Method: EMCReturnToStoragePool "
"ConfigService: %(service)s TheElement: %(vol_instance)s "
try:
rc = self.provision.delete_volume_from_pool(
self.conn, storageConfigService, volumeInstance.path,
- volumeName, self.extraSpecs)
+ volumeName, extraSpecs)
except Exception as e:
# If we cannot successfully delete the volume then we want to
.add_volume_to_default_storage_group_for_fast_policy(
self.conn, controllerConfigurationService,
volumeInstance, volumeName, fastPolicyName,
- self.extraSpecs))
+ extraSpecs))
if assocDefaultStorageGroupName is None:
LOG.error(_LE(
"Failed to Roll back to re-add volume %(volumeName)s "
return rc
def _delete_from_pool_v3(self, storageConfigService, volumeInstance,
- volumeName, deviceId, storageGroupName):
+ volumeName, deviceId, storageGroupName,
+ extraSpecs):
"""Delete from pool (v3).
:param storageConfigService: the storage config service
:param volumeName: the volume Name
:param deviceId: the device ID of the volume
:param storageGroupName: the name of the default SG
+ :param extraSpecs: extra specifications
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
# extra logic for case when volume is the last member.
sgFromVolInstanceName = self.masking.remove_and_reset_members(
self.conn, controllerConfigurationService, volumeInstance,
- volumeName, self.extraSpecs, None, 'noReset')
+ volumeName, extraSpecs, None, 'noReset')
LOG.debug("Delete Volume: %(name)s Method: EMCReturnToStoragePool "
"ConfigServic: %(service)s TheElement: %(vol_instance)s "
try:
rc = self.provisionv3.delete_volume_from_pool(
self.conn, storageConfigService, volumeInstance.path,
- volumeName, self.extraSpecs)
+ volumeName, extraSpecs)
except Exception as e:
# If we cannot successfully delete the volume, then we want to
self.masking.add_volume_to_storage_group(
self.conn, controllerConfigurationService,
storageGroupInstanceName, volumeInstance, volumeName,
- storageGroupName, self.extraSpecs)
+ storageGroupName, extraSpecs)
LOG.error(_LE("Exception: %s."), e)
errorMessage = (_("Failed to delete volume %(volumeName)s.") %
return rc
def _create_clone_v2(self, repServiceInstanceName, cloneVolume,
- sourceVolume, sourceInstance, isSnapshot):
+ sourceVolume, sourceInstance, isSnapshot,
+ extraSpecs):
"""Create a clone (v2).
:param repServiceInstanceName: the replication service
:param sourceVolume: the source volume object
:param sourceInstance: the device ID of the volume
:param isSnapshot: check to see if it is a snapshot
+ :param extraSpecs: extra specifications
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
if metaHeadInstanceName is None: # Simple volume.
return self._create_v2_replica_and_delete_clone_relationship(
repServiceInstanceName, cloneVolume, sourceVolume,
- sourceInstance, None, isSnapshot)
+ sourceInstance, None, extraSpecs, isSnapshot)
else: # Composite volume with meta device members.
# Check if the meta members capacity.
metaMemberInstanceNames = (
LOG.debug("Meta volume all of the same size.")
return self._create_v2_replica_and_delete_clone_relationship(
repServiceInstanceName, cloneVolume, sourceVolume,
- sourceInstance, None, isSnapshot)
+ sourceInstance, None, extraSpecs, isSnapshot)
LOG.debug("Meta volumes are of different sizes, "
"%d different sizes.", len(set(volumeCapacities)))
volumeSizeInbits))}
rc, baseVolumeDict, storageSystemName = (
self._create_composite_volume(
- volume, baseVolumeName, volumeSizeInbits))
+ volume, baseVolumeName, volumeSizeInbits,
+ extraSpecs))
baseTargetVolumeInstance = self.utils.find_volume_instance(
self.conn, baseVolumeDict, baseVolumeName)
LOG.debug("Base target volume %(targetVol)s created. "
unboundVolumeInstance = (
self._create_and_get_unbound_volume(
self.conn, storageConfigService,
- baseTargetVolumeInstance.path, volumeSizeInbits))
+ baseTargetVolumeInstance.path, volumeSizeInbits,
+ extraSpecs))
if unboundVolumeInstance is None:
exceptionMessage = (_(
"Error Creating unbound volume."))
self.utils.find_element_composition_service(
self.conn, storageSystemName))
compositeType = self.utils.get_composite_type(
- self.extraSpecs[COMPOSITETYPE])
+ extraSpecs[COMPOSITETYPE])
rc, modifiedVolumeDict = (
self._modify_and_get_composite_volume_instance(
self.conn,
baseTargetVolumeInstance,
unboundVolumeInstance.path,
targetVolumeName,
- compositeType))
+ compositeType,
+ extraSpecs))
if modifiedVolumeDict is None:
exceptionMessage = (_(
"Error appending volume %(volumename)s to "
LOG.debug("Create V2 replica for meta members of different sizes.")
return self._create_v2_replica_and_delete_clone_relationship(
repServiceInstanceName, cloneVolume, sourceVolume,
- sourceInstance, baseTargetVolumeInstance, isSnapshot)
+ sourceInstance, baseTargetVolumeInstance, extraSpecs,
+ isSnapshot)
def _create_v2_replica_and_delete_clone_relationship(
self, repServiceInstanceName, cloneVolume, sourceVolume,
- sourceInstance, targetInstance, isSnapshot=False):
+ sourceInstance, targetInstance, extraSpecs, isSnapshot=False):
"""Create a replica and delete the clone relationship.
:param repServiceInstanceName: the replication service
:param sourceVolume: the source volume object
:param sourceInstance: the source volume instance
:param targetInstance: the target volume instance
+ :param extraSpecs: extra specifications
:param isSnapshot: check to see if it is a snapshot
:returns: int -- return code
:returns: dict -- cloneDict
cloneName = cloneVolume['name']
rc, job = self.provision.create_element_replica(
self.conn, repServiceInstanceName, cloneName, sourceName,
- sourceInstance, targetInstance, self.extraSpecs)
+ sourceInstance, targetInstance, extraSpecs)
cloneDict = self.provision.get_volume_dict_from_job(
self.conn, job['Job'])
-
- fastPolicyName = self.extraSpecs[FASTPOLICY]
+ fastPolicyName = extraSpecs[FASTPOLICY]
if isSnapshot:
if fastPolicyName is not None:
storageSystemName = sourceInstance['SystemName']
self._add_clone_to_default_storage_group(
- fastPolicyName, storageSystemName, cloneDict, cloneName)
+ fastPolicyName, storageSystemName, cloneDict, cloneName,
+ extraSpecs)
LOG.info(_LI("Snapshot creation %(cloneName)s completed. "
"Source Volume: %(sourceName)s."),
{'cloneName': cloneName,
# 8 - Detach operation.
rc, job = self.provision.delete_clone_relationship(
self.conn, repServiceInstanceName, syncInstanceName,
- self.extraSpecs)
+ extraSpecs)
if fastPolicyName is not None:
self._add_clone_to_default_storage_group(
- fastPolicyName, storageSystemName, cloneDict, cloneName)
+ fastPolicyName, storageSystemName, cloneDict, cloneName,
+ extraSpecs)
return rc, cloneDict
def _create_replica_v3(
self, repServiceInstanceName, cloneVolume,
- sourceVolume, sourceInstance, isSnapshot):
+ sourceVolume, sourceInstance, isSnapshot, extraSpecs):
"""Create a replica.
V3 specific function, create replica for source volume,
:param sourceVolume: the source volume object
:param sourceInstance: the device ID of the volume
:param isSnapshot: boolean -- check to see if it is a snapshot
+ :param extraSpecs: extra specifications
:returns: int -- return code
:returns: dict -- cloneDict
"""
syncType = self.utils.get_num(8, '16') # Default syncType 8: clone.
# Create target volume
- self.extraSpecs = self._initial_setup(cloneVolume)
+ extraSpecs = self._initial_setup(cloneVolume)
numOfBlocks = sourceInstance['NumberOfBlocks']
blockSize = sourceInstance['BlockSize']
int(self.utils.convert_bits_to_gbs(volumeSizeInbits))}
_, volumeDict, storageSystemName = (
self._create_v3_volume(
- volume, cloneName, volumeSizeInbits))
+ volume, cloneName, volumeSizeInbits, extraSpecs))
targetInstance = self.utils.find_volume_instance(
self.conn, volumeDict, cloneName)
LOG.debug("Create replica target volume "
_, job = (
self.provisionv3.create_element_replica(
self.conn, repServiceInstanceName, cloneName, syncType,
- sourceInstance, self.extraSpecs, targetInstance))
+ sourceInstance, extraSpecs, targetInstance))
cloneDict = self.provisionv3.get_volume_dict_from_job(
self.conn, job['Job'])
rc, job = self.provisionv3.break_replication_relationship(
self.conn, repServiceInstanceName, syncInstanceName,
- operation, self.extraSpecs)
+ operation, extraSpecs)
return rc, cloneDict
def _delete_cg_and_members(
- self, storageSystem, cgName, modelUpdate, volumes):
+ self, storageSystem, cgName, modelUpdate, volumes, extraSpecs):
"""Helper function to delete a consistencygroup and its member volumes.
:param storageSystem: storage system
:param cgName: consistency group name
:param modelUpdate: dict -- the model update dict
:param volumes: the list of member volumes
+ :param extraSpecs: extra specifications
:returns: dict -- modelUpdate
:returns: list -- the updated list of member volumes
:raises: VolumeBackendAPIException
self.conn, storageSystem))
cgInstanceName = self._find_consistency_group(
replicationService, cgName)
+
if cgInstanceName is None:
exception_message = (_("Cannot find CG group %s.") % cgName)
raise exception.VolumeBackendAPIException(
data=exception_message)
+
memberInstanceNames = self._get_members_of_replication_group(
cgInstanceName)
self.provision.delete_consistency_group(
self.conn, replicationService, cgInstanceName, cgName,
- self.extraSpecs)
+ extraSpecs)
if memberInstanceNames:
try:
for memberInstanceName in memberInstanceNames:
self._remove_device_from_storage_group(
controllerConfigurationService,
- memberInstanceName, 'Member Volume')
+ memberInstanceName, 'Member Volume', extraSpecs)
LOG.debug("Deleting CG members. CG: %(cg)s "
"%(numVols)lu member volumes: %(memVols)s.",
{'cg': cgInstanceName,
'numVols': len(memberInstanceNames),
'memVols': memberInstanceNames})
- if self.extraSpecs[ISV3]:
+ if extraSpecs[ISV3]:
self.provisionv3.delete_volume_from_pool(
self.conn, storageConfigservice,
- memberInstanceNames, None, self.extraSpecs)
+ memberInstanceNames, None, extraSpecs)
else:
self.provision.delete_volume_from_pool(
self.conn, storageConfigservice,
- memberInstanceNames, None, self.extraSpecs)
+ memberInstanceNames, None, extraSpecs)
for volumeRef in volumes:
volumeRef['status'] = 'deleted'
except Exception: