doc = Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
+ doc = self.add_array_info(doc, emc)
+ filename = 'cinder_emc_config_ISCSINoFAST.xml'
+ self.config_file_path = self.tempdir + '/' + filename
+
+ f = open(self.config_file_path, 'w')
+ doc.writexml(f)
+ f.close()
+
+ def create_fake_config_file_no_fast_with_add_ons(self):
+
+ doc = Document()
+ emc = doc.createElement("EMC")
+ doc.appendChild(emc)
+ doc = self.add_array_info(doc, emc)
+ doc = self.add_interval_and_retries(doc, emc)
+ filename = 'cinder_emc_config_ISCSINoFAST.xml'
+ self.config_file_path = self.tempdir + '/' + filename
+
+ f = open(self.config_file_path, 'w')
+ doc.writexml(f)
+ f.close()
+ def add_array_info(self, doc, emc):
array = doc.createElement("Array")
arraytext = doc.createTextNode("1234567891011")
emc.appendChild(array)
timeouttext = doc.createTextNode("0")
emc.appendChild(timeout)
timeout.appendChild(timeouttext)
+ return doc
- filename = 'cinder_emc_config_ISCSINoFAST.xml'
+ def add_interval_and_retries(self, doc, emc):
+ interval = doc.createElement("Interval")
+ intervaltext = doc.createTextNode("5")
+ emc.appendChild(interval)
+ interval.appendChild(intervaltext)
- self.config_file_path = self.tempdir + '/' + filename
-
- f = open(self.config_file_path, 'w')
- doc.writexml(f)
- f.close()
+ retries = doc.createElement("Retries")
+ retriestext = doc.createTextNode("40")
+ emc.appendChild(retries)
+ retries.appendChild(retriestext)
+ return doc
# fix for https://bugs.launchpad.net/cinder/+bug/1364232
def create_fake_config_file_1364232(self):
# Tests removal of last volume in a storage group V2
def test_remove_and_reset_members(self):
- fastPolicyName = 'gold'
- isV3 = False
+ extraSpecs = {'volume_backend_name': 'GOLD_BE',
+ 'isV3': False}
conn = self.fake_ecom_connection()
controllerConfigService = (
self.driver.utils.find_controller_configuration_service(
volumeInstanceName = (
conn.EnumerateInstanceNames("EMC_StorageVolume")[0])
volumeInstance = conn.GetInstance(volumeInstanceName)
- volumeName = "last-Vol"
+ volumeName = "1416035-Vol"
self.driver.common.masking.get_devices_from_storage_group = mock.Mock(
return_value=['one_value'])
self.driver.common.masking.utils.get_existing_instance = mock.Mock(
self.driver.common.masking.remove_and_reset_members(
conn, controllerConfigService, volumeInstance,
- fastPolicyName, volumeName, isV3)
+ volumeName, extraSpecs)
# Bug 1393555 - masking view has been deleted by another process.
def test_find_maskingview(self):
if bExists:
os.remove(self.config_file_1364232)
+ @mock.patch.object(
+ emc_vmax_common.EMCVMAXCommon,
+ '_get_pool_and_storage_system',
+ return_value=(None, EMCVMAXCommonData.storage_system))
+ @mock.patch.object(
+ volume_types,
+ 'get_volume_type_extra_specs',
+ return_value={'volume_backend_name': 'ISCSINoFAST'})
+ def test_intervals_and_retries(
+ self, _mock_volume_type, mock_storage_system):
+ save_config_path = self.config_file_path
+ self.create_fake_config_file_no_fast_with_add_ons()
+ self.driver.create_volume(self.data.test_volume_v2)
+ extraSpecs = self.driver.common.extraSpecs
+ self.assertEqual(40,
+ self.driver.utils._get_max_job_retries(extraSpecs))
+ self.assertEqual(5,
+ self.driver.utils._get_interval_in_secs(extraSpecs))
+
+ bExists = os.path.exists(self.config_file_path)
+ if bExists:
+ os.remove(self.config_file_path)
+
+ self.config_file_path = save_config_path
+
@mock.patch.object(
emc_vmax_utils.EMCVMAXUtils,
'find_storageSystem',
provisionv3.create_group_replica.assert_called_once_with(
self.conn, repServ,
(None, EMCVMAXCommonData.test_CG),
- (None, EMCVMAXCommonData.test_CG), '12de')
+ (None, EMCVMAXCommonData.test_CG), '12de',
+ EMCVMAXCommonData.extra_specs)
@mock.patch.object(
volume_types,
# V3
SLO = 'storagetype:slo'
WORKLOAD = 'storagetype:workload'
+INTERVAL = 'storagetype:interval'
+RETRIES = 'storagetype:retries'
ISV3 = 'isV3'
emc_opts = [
self.url = None
self.user = None
self.passwd = None
+ self.extraSpecs = {}
self.masking = emc_vmax_masking.EMCVMAXMasking(prtcl)
self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
self.fast = emc_vmax_fast.EMCVMAXFast(prtcl)
"""
volumeSize = int(self.utils.convert_gb_to_bits(volume['size']))
volumeName = volume['id']
- extraSpecs = self._initial_setup(volume)
+ self.extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
- if extraSpecs[ISV3]:
+ if self.extraSpecs[ISV3]:
rc, volumeDict, storageSystemName = (
- self._create_v3_volume(volume, extraSpecs,
- volumeName, volumeSize))
+ self._create_v3_volume(volume, volumeName, volumeSize))
else:
rc, volumeDict, storageSystemName = (
- self._create_composite_volume(volume, extraSpecs,
- volumeName, volumeSize))
+ self._create_composite_volume(volume, volumeName, volumeSize))
# If volume is created as part of a consistency group.
if 'consistencygroup_id' in volume and volume['consistencygroup_id']:
cgInstanceName,
volumeInstance.path,
cgName,
- volumeName)
+ volumeName,
+ self.extraSpecs)
LOG.info(_LI("Leaving create_volume: %(volumeName)s "
"Return code: %(rc)lu "
:returns: cloneVolumeDict - the cloned volume dictionary
"""
LOG.debug("Entering create_volume_from_snapshot.")
- self._initial_setup(volume)
+ self.extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
snapshotInstance = self._find_lun(snapshot)
storageSystem = snapshotInstance['SystemName']
data=exception_message)
self.provision.delete_clone_relationship(
- self.conn, repservice, syncName)
+ self.conn, repservice, syncName, self.extraSpecs)
- return self._create_cloned_volume(volume, snapshot)
+ return self._create_cloned_volume(volume, snapshot, False)
def create_cloned_volume(self, cloneVolume, sourceVolume):
"""Creates a clone of the specified volume.
:param sourceVolume - volume object
:returns: cloneVolumeDict - the cloned volume dictionary
"""
- return self._create_cloned_volume(cloneVolume, sourceVolume)
+ return self._create_cloned_volume(cloneVolume, sourceVolume, False)
def delete_volume(self, volume):
"""Deletes a EMC(VMAX) volume.
self._delete_snapshot(snapshot)
def _remove_members(self, controllerConfigService,
- volumeInstance, extraSpecs, connector):
+ volumeInstance, connector):
"""This method unmaps a volume from a host.
Removes volume from the Device Masking Group that belongs to
"""
volumeName = volumeInstance['ElementName']
LOG.debug("Detaching volume %s.", volumeName)
- fastPolicyName = extraSpecs.get(FASTPOLICY, None)
- isV3 = extraSpecs[ISV3]
return self.masking.remove_and_reset_members(
self.conn, controllerConfigService, volumeInstance,
- fastPolicyName, volumeName, isV3, connector)
+ volumeName, self.extraSpecs, connector)
def _unmap_lun(self, volume, connector):
"""Unmaps a volume from the host.
:param connector: the connector Object
:raises: VolumeBackendAPIException
"""
- extraSpecs = self._initial_setup(volume)
+ self.extraSpecs = self._initial_setup(volume)
volumename = volume['name']
LOG.info(_LI("Unmap volume: %(volume)s."),
{'volume': volumename})
% {'storage_system': storage_system})
raise exception.VolumeBackendAPIException(data=exception_message)
- self._remove_members(configservice, vol_instance,
- extraSpecs, connector)
+ self._remove_members(configservice, vol_instance, connector)
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns device and connection info.
:returns: deviceInfoDict, device information tuple
:raises: VolumeBackendAPIException
"""
- extraSpecs = self._initial_setup(volume)
+ self.extraSpecs = self._initial_setup(volume)
volumeName = volume['name']
LOG.info(_LI("Initialize connection: %(volume)s."),
'deviceNumber': deviceNumber})
else:
deviceInfoDict = self._attach_volume(
- volume, connector, extraSpecs, True)
+ volume, connector, True)
else:
- deviceInfoDict = self._attach_volume(volume, connector, extraSpecs)
+ deviceInfoDict = self._attach_volume(volume, connector)
return deviceInfoDict
- def _attach_volume(self, volume, connector, extraSpecs,
- isLiveMigration=None):
+ def _attach_volume(self, volume, connector, isLiveMigration=None):
"""Attach a volume to a host.
If live migration is being undertaken then the volume
"""
volumeName = volume['name']
maskingViewDict = self._populate_masking_dict(
- volume, connector, extraSpecs)
+ volume, connector)
if isLiveMigration:
maskingViewDict['isLiveMigration'] = True
else:
maskingViewDict['isLiveMigration'] = False
rollbackDict = self.masking.get_or_create_masking_view_and_map_lun(
- self.conn, maskingViewDict)
+ self.conn, maskingViewDict, self.extraSpecs)
# Find host lun id again after the volume is exported to the host.
deviceInfoDict = self.find_device_number(volume)
:params volume: the volume Object
:params connectorL the connector Object
"""
- self._initial_setup(volume)
+ self.extraSpecs = self._initial_setup(volume)
volumename = volume['name']
LOG.info(_LI("Terminate connection: %(volume)s."),
"""
originalVolumeSize = volume['size']
volumeName = volume['name']
- self._initial_setup(volume)
+ self.extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
LOG.info(_LI("Migrating using retype Volume: %(volume)s."),
{'volume': volumeName})
- extraSpecs = self._initial_setup(volume)
+ self.extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
volumeInstance = self._find_lun(volume)
{'name': volumeName})
return False
- if extraSpecs[ISV3]:
+ if self.extraSpecs[ISV3]:
return self._slo_workload_migration(volumeInstance, volume, host,
volumeName, volumeStatus,
- extraSpecs, new_type)
+ new_type)
else:
return self._pool_migration(volumeInstance, volume, host,
volumeName, volumeStatus,
- extraSpecs[FASTPOLICY], new_type)
+ self.extraSpecs[FASTPOLICY], new_type)
def migrate_volume(self, ctxt, volume, host, new_type=None):
"""Migrate volume to another host.
try:
self.provision.migrate_volume_to_storage_pool(
conn, storageRelocationService, volumeInstance.path,
- sourcePoolInstanceName)
+ sourcePoolInstanceName, self.extraSpecs)
except Exception:
LOG.error(_LE(
"Failed to return volume %(volumeName)s to "
defaultStorageGroupInstanceName)):
self.provision.remove_device_from_storage_group(
conn, controllerConfigurationService,
- assocStorageGroupInstanceName, volumeInstance.path, volumeName)
+ assocStorageGroupInstanceName,
+ volumeInstance.path, volumeName, self.extraSpecs)
self.add_to_default_SG(
conn, volumeInstance, storageSystemName, sourceFastPolicyName,
defaultStorageGroupInstanceName = (
self.fast.get_or_create_default_storage_group(
self.conn, controllerConfigurationService,
- targetFastPolicyName, volumeInstance))
+ targetFastPolicyName, volumeInstance, self.extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.error(_LE(
"Unable to create or get default storage group for FAST policy"
defaultStorageGroupInstanceName = (
self.fast.add_volume_to_default_storage_group_for_fast_policy(
self.conn, controllerConfigurationService, volumeInstance,
- volumeName, targetFastPolicyName))
+ volumeName, targetFastPolicyName, self.extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.error(_LE(
"Failed to verify that volume was added to storage group for "
try:
rc = self.provision.migrate_volume_to_storage_pool(
self.conn, storageRelocationService, volumeInstance.path,
- targetPoolInstanceName)
+ targetPoolInstanceName, self.extraSpecs)
except Exception as e:
# Rollback by deleting the volume if adding the volume to the
# default storage group were to fail.
LOG.debug("Terminating migration session on: %(volumeName)s.",
{'volumeName': volumeName})
self.provision._terminate_migrate_session(
- self.conn, volumeInstance.path)
+ self.conn, volumeInstance.path, self.extraSpecs)
if rc == 0:
moved = True
defaultStorageGroupInstanceName = (
self.masking.remove_device_from_default_storage_group(
conn, controllerConfigurationService,
- volumeInstance.path, volumeName, sourceFastPolicyName))
+ volumeInstance.path, volumeName, sourceFastPolicyName,
+ self.extraSpecs))
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
exceptionMessage = (_(
self.fast
.add_volume_to_default_storage_group_for_fast_policy(
conn, controllerConfigurationService, volumeInstance,
- volumeName, targetFastPolicyName))
+ volumeName, targetFastPolicyName, self.extraSpecs))
if assocDefaultStorageGroupName is None:
LOG.error(_LE(
"Failed to add %(volumeName)s "
:returns: string the configuration file
"""
try:
- extraSpecs, configurationFile = (
+ self.extraSpecs, configurationFile = (
self._set_config_file_and_get_extra_specs(
volume, volumeTypeId))
isV3 = self.utils.isArrayV3(self.conn, arrayName)
if isV3:
- extraSpecs = self._set_v3_extra_specs(
- extraSpecs, configurationFile, arrayName)
+ self.extraSpecs = self._set_v3_extra_specs(
+ configurationFile, arrayName)
else:
# V2 extra specs.
- extraSpecs = self._set_v2_extra_specs(
- extraSpecs, configurationFile, arrayName)
+ self.extraSpecs = self._set_v2_extra_specs(
+ configurationFile, arrayName)
except Exception:
exceptionMessage = (_(
"Unable to get configuration information necessary to create "
"/etc/cinder/cinder_emc_config_<CONFIG_GROUP>.xml."))
raise exception.VolumeBackendAPIException(data=exceptionMessage)
- return extraSpecs
+ return self.extraSpecs
- def _get_pool_and_storage_system(self, extraSpecs):
+ def _get_pool_and_storage_system(self):
"""Given the extra specs get the pool and storage system name.
- :params extraSpecs: the extra spec tuple
:returns: poolInstanceName The pool instance name
:returns: String the storage system name
"""
try:
- array = extraSpecs[ARRAY]
+ array = self.extraSpecs[ARRAY]
poolInstanceName, storageSystemStr = self._find_pool_in_array(
- array, extraSpecs[POOL], extraSpecs[ISV3])
+ array, self.extraSpecs[POOL], self.extraSpecs[ISV3])
except Exception:
exceptionMessage = (_(
"You must supply an array in your EMC configuration file."))
return poolInstanceName, storageSystemStr
- def _populate_masking_dict(self, volume, connector, extraSpecs):
+ def _populate_masking_dict(self, volume, connector):
"""Get all the names of the maskingView and subComponents.
:param volume: the volume object
"""
maskingViewDict = {}
hostName = connector['host']
- poolName = extraSpecs[POOL]
- isV3 = extraSpecs[ISV3]
+ poolName = self.extraSpecs[POOL]
+ isV3 = self.extraSpecs[ISV3]
maskingViewDict['isV3'] = isV3
protocol = self.utils.get_short_protocol_type(self.protocol)
shortHostName = self.utils.get_host_short_name(hostName)
if isV3:
- slo = extraSpecs[SLO]
- workload = extraSpecs[WORKLOAD]
+ slo = self.extraSpecs[SLO]
+ workload = self.extraSpecs[WORKLOAD]
maskingViewDict['slo'] = slo
maskingViewDict['workload'] = workload
maskingViewDict['pool'] = poolName
self.fast.add_volume_to_default_storage_group_for_fast_policy(
self.conn, controllerConfigurationService, volumeInstance,
- volumeName, fastPolicyName)
+ volumeName, fastPolicyName, self.extraSpecs)
foundStorageGroupInstanceName = (
self.utils.get_storage_group_from_volume(
self.conn, volumeInstance.path))
LOG.error(errorMessage)
self.provision.delete_volume_from_pool(
self.conn, storageConfigService, volumeInstance.path,
- volumeName)
+ volumeName, self.extraSpecs)
raise exception.VolumeBackendAPIException(data=errorMessage)
def _create_and_get_unbound_volume(
volumeDict, _ = (
self.provision.create_volume_from_pool(
self.conn, storageConfigService, volumeName, poolInstanceName,
- volumeSize))
+ volumeSize, self.extraSpecs))
volumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, volumeName)
return volumeInstance
self.provision.unbind_volume_from_storage_pool(
conn, storageConfigService, poolInstanceName,
volumeInstanceName,
- volumeName))
+ volumeName, self.extraSpecs))
volumeDict = self.provision.get_volume_dict_from_job(conn, job['Job'])
volumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, volumeName)
if 'True' in isComposite:
rc, job = self.provision.modify_composite_volume(
conn, elementCompositionServiceInstanceName,
- volumeInstance.path, appendVolumeInstanceName)
+ volumeInstance.path, appendVolumeInstanceName, self.extraSpecs)
elif 'False' in isComposite:
rc, job = self.provision.create_new_composite_volume(
conn, elementCompositionServiceInstanceName,
- volumeInstance.path, appendVolumeInstanceName, compositeType)
+ volumeInstance.path, appendVolumeInstanceName, compositeType,
+ self.extraSpecs)
else:
LOG.error(_LE(
"Unable to determine whether %(volumeName)s is "
defaultStorageGroupInstanceName = (
self.fast.get_or_create_default_storage_group(
self.conn, controllerConfigService, fastPolicyName,
- volumeInstance))
+ volumeInstance, self.extraSpecs))
return defaultStorageGroupInstanceName
def _create_cloned_volume(
:param sourceVolume: source of the clone volume
:returns: cloneDict the cloned volume dictionary
"""
- extraSpecs = self._initial_setup(cloneVolume)
+ self.extraSpecs = self._initial_setup(cloneVolume)
sourceName = sourceVolume['name']
cloneName = cloneVolume['name']
'elementname': cloneName,
'sourceelement': sourceInstance.path})
- if extraSpecs[ISV3]:
+ if self.extraSpecs[ISV3]:
rc, cloneDict = self._create_replica_v3(repServiceInstanceName,
cloneVolume,
sourceVolume,
cloneVolume,
sourceVolume,
sourceInstance,
- isSnapshot,
- extraSpecs)
+ isSnapshot)
LOG.debug("Leaving _create_cloned_volume: Volume: "
"%(cloneName)s Source Volume: %(sourceName)s "
"Return code: %(rc)lu.",
rc = -1
errorRet = (rc, volumeName)
- extraSpecs = self._initial_setup(volume)
+ self.extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
volumeInstance = self._find_lun(volume)
deviceId = volumeInstance['DeviceID']
- if extraSpecs[ISV3]:
+ if self.extraSpecs[ISV3]:
storageGroupName = self.utils.get_v3_storage_group_name(
- extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD])
+ self.extraSpecs[POOL], self.extraSpecs[SLO],
+ self.extraSpecs[WORKLOAD])
rc = self._delete_from_pool_v3(
storageConfigService, volumeInstance, volumeName,
deviceId, storageGroupName)
else:
rc = self._delete_from_pool(storageConfigService, volumeInstance,
volumeName, deviceId,
- extraSpecs[FASTPOLICY])
+ self.extraSpecs[FASTPOLICY])
return (rc, volumeName)
def _remove_device_from_storage_group(
self.provision.remove_device_from_storage_group(
self.conn, controllerConfigurationService,
storageGroupInstanceName,
- volumeInstanceName, volumeName)
+ volumeInstanceName, volumeName, self.extraSpecs)
def _find_lunmasking_scsi_protocol_controller(self, storageSystemName,
connector):
LOG.info(_LI("Delete Snapshot: %(snapshot)s."),
{'snapshot': snapshotname})
- extraSpecs = self._initial_setup(snapshot)
+ self.extraSpecs = self._initial_setup(snapshot)
self.conn = self._get_ecom_connection()
- if not extraSpecs[ISV3]:
+ if not self.extraSpecs[ISV3]:
snapshotInstance = self._find_lun(snapshot)
storageSystem = snapshotInstance['SystemName']
'syncName': str(syncName)})
self.provision.delete_clone_relationship(
- self.conn, repservice, syncName, True)
+ self.conn, repservice, syncName, self.extraSpecs, True)
# Delete the target device.
self._delete_volume(snapshot)
cgName = self.utils.truncate_string(group['id'], 8)
- extraSpecs = self._initial_setup(None, volumeTypeId)
+ self.extraSpecs = self._initial_setup(None, volumeTypeId)
_, storageSystem = (
- self._get_pool_and_storage_system(extraSpecs))
+ self._get_pool_and_storage_system())
self.conn = self._get_ecom_connection()
replicationService = self.utils.find_replication_service(
self.conn, storageSystem)
self.provision.create_consistency_group(
- self.conn, replicationService, cgName)
+ self.conn, replicationService, cgName, self.extraSpecs)
except Exception as ex:
LOG.error(_LE("Exception: %(ex)s"), {'ex': ex})
exceptionMessage = (_("Failed to create consistency group:"
modelUpdate['status'] = group['status']
volumeTypeId = group['volume_type_id'].replace(",", "")
- extraSpecs = self._initial_setup(None, volumeTypeId)
+ self.extraSpecs = self._initial_setup(None, volumeTypeId)
_, storageSystem = (
- self._get_pool_and_storage_system(extraSpecs))
+ self._get_pool_and_storage_system())
try:
replicationService = self.utils.find_replication_service(
self.provision.delete_consistency_group(self.conn,
replicationService,
- cgInstanceName, cgName)
+ cgInstanceName, cgName,
+ self.extraSpecs)
# Do a bulk delete, a lot faster than single deletes.
if memberInstanceNames:
volumes, modelUpdate = self._do_bulk_delete(
storageSystem, memberInstanceNames, storageConfigservice,
- volumes, modelUpdate, extraSpecs[ISV3])
+ volumes, modelUpdate, self.extraSpecs[ISV3])
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
if isV3:
self.provisionv3.delete_volume_from_pool(
self.conn, storageConfigservice,
- memberInstanceNames, None)
+ memberInstanceNames, None, self.extraSpecs)
else:
self.provision.delete_volume_from_pool(
self.conn, storageConfigservice,
- memberInstanceNames, None)
+ memberInstanceNames, None, self.extraSpecs)
for volumeRef in volumes:
volumeRef['status'] = 'deleted'
except Exception:
modelUpdate = {'status': 'available'}
volumeTypeId = consistencyGroup['volume_type_id'].replace(",", "")
- extraSpecs = self._initial_setup(None, volumeTypeId)
+ self.extraSpecs = self._initial_setup(None, volumeTypeId)
self.conn = self._get_ecom_connection()
_, storageSystem = (
- self._get_pool_and_storage_system(extraSpecs))
+ self._get_pool_and_storage_system())
try:
replicationService = self.utils.find_replication_service(
# Create the target consistency group.
targetCgName = self.utils.truncate_string(cgsnapshot['id'], 8)
self.provision.create_consistency_group(
- self.conn, replicationService, targetCgName)
+ self.conn, replicationService, targetCgName, self.extraSpecs)
targetCgInstanceName = self._find_consistency_group(
replicationService, targetCgName)
LOG.info(_LI("Create target consistency group %(targetCg)s."),
volume = {'size': int(self.utils.convert_bits_to_gbs(
volumeSizeInbits))}
- if extraSpecs[ISV3]:
+ if self.extraSpecs[ISV3]:
_, volumeDict, _ = (
self._create_v3_volume(
- volume, extraSpecs,
- targetVolumeName, volumeSizeInbits))
+ volume, targetVolumeName, volumeSizeInbits))
else:
_, volumeDict, _ = (
self._create_composite_volume(
- volume, extraSpecs,
- targetVolumeName, volumeSizeInbits))
+ volume, targetVolumeName, volumeSizeInbits))
targetVolumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, targetVolumeName)
LOG.debug("Create target volume for member volume "
targetCgInstanceName,
targetVolumeInstance.path,
targetCgName,
- targetVolumeName)
+ targetVolumeName,
+ self.extraSpecs)
# Less than 5 characters relationship name.
relationName = self.utils.truncate_string(cgsnapshot['id'], 5)
- if extraSpecs[ISV3]:
- self.provisionv3.create_group_replica(
- self.conn, replicationService, cgInstanceName,
- targetCgInstanceName, relationName)
+ if self.extraSpecs[ISV3]:
+ self.provisionv3.create_group_replica(
+ self.conn, replicationService, cgInstanceName,
+ targetCgInstanceName, relationName, self.extraSpecs)
else:
- self.provision.create_group_replica(
- self.conn, replicationService, cgInstanceName,
- targetCgInstanceName, relationName)
+ self.provision.create_group_replica(
+ self.conn, replicationService, cgInstanceName,
+ targetCgInstanceName, relationName, self.extraSpecs)
# Break the replica group relationship.
rgSyncInstanceName = self.utils.find_group_sync_rg_by_target(
self.conn, storageSystem, targetCgInstanceName, True)
storageSystem)
raise exception.VolumeBackendAPIException(
data=exception_message)
- if extraSpecs[ISV3]:
+ if self.extraSpecs[ISV3]:
# Operation 7: dissolve for snapVx.
operation = self.utils.get_num(9, '16')
self.provisionv3.break_replication_relationship(
- self.conn, repservice, rgSyncInstanceName, operation)
+ self.conn, repservice, rgSyncInstanceName, operation,
+ self.extraSpecs)
else:
self.provision.delete_clone_relationship(self.conn, repservice,
- rgSyncInstanceName)
+ rgSyncInstanceName,
+ self.extraSpecs)
except Exception as ex:
modelUpdate['status'] = 'error'
modelUpdate = {'status': 'deleted'}
volumeTypeId = consistencyGroup['volume_type_id'].replace(",", "")
- extraSpecs = self._initial_setup(None, volumeTypeId)
+ self.extraSpecs = self._initial_setup(None, volumeTypeId)
self.conn = self._get_ecom_connection()
_, storageSystem = (
- self._get_pool_and_storage_system(extraSpecs))
+ self._get_pool_and_storage_system())
try:
targetCgName = self.utils.truncate_string(cgsnapshot['id'], 8)
modelUpdate, snapshots = self._delete_cg_and_members(
- storageSystem, extraSpecs, targetCgName, modelUpdate,
+ storageSystem, targetCgName, modelUpdate,
snapshots)
except Exception as ex:
modelUpdate['status'] = 'error_deleting'
return memberInstanceNames
def _create_composite_volume(
- self, volume, extraSpecs, volumeName, volumeSize):
+ self, volume, volumeName, volumeSize):
"""Create a composite volume (V2).
:param volume: the volume object
:returns:
"""
memberCount, errorDesc = self.utils.determine_member_count(
- volume['size'], extraSpecs[MEMBERCOUNT], extraSpecs[COMPOSITETYPE])
+ volume['size'], self.extraSpecs[MEMBERCOUNT],
+ self.extraSpecs[COMPOSITETYPE])
if errorDesc is not None:
exceptionMessage = (_("The striped meta count of %(memberCount)s "
"is too small for volume: %(volumeName)s "
raise exception.VolumeBackendAPIException(data=exceptionMessage)
poolInstanceName, storageSystemName = (
- self._get_pool_and_storage_system(extraSpecs))
+ self._get_pool_and_storage_system())
LOG.debug("Create Volume: %(volume)s Pool: %(pool)s "
"Storage System: %(storageSystem)s "
# If FAST is intended to be used we must first check that the pool
# is associated with the correct storage tier.
- if extraSpecs[FASTPOLICY] is not None:
+ if self.extraSpecs[FASTPOLICY] is not None:
foundPoolInstanceName = self.fast.get_pool_associated_to_policy(
- self.conn, extraSpecs[FASTPOLICY], extraSpecs[ARRAY],
+ self.conn, self.extraSpecs[FASTPOLICY], self.extraSpecs[ARRAY],
storageConfigService, poolInstanceName)
if foundPoolInstanceName is None:
exceptionMessage = (_("Pool: %(poolName)s. "
"is not associated to storage tier for "
"fast policy %(fastPolicy)s.")
- % {'poolName': extraSpecs[POOL],
- 'fastPolicy': extraSpecs[FASTPOLICY]})
+ % {'poolName': self.extraSpecs[POOL],
+ 'fastPolicy':
+ self.extraSpecs[FASTPOLICY]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
compositeType = self.utils.get_composite_type(
- extraSpecs[COMPOSITETYPE])
+ self.extraSpecs[COMPOSITETYPE])
volumeDict, rc = self.provision.create_composite_volume(
self.conn, elementCompositionService, volumeSize, volumeName,
- poolInstanceName, compositeType, memberCount)
+ poolInstanceName, compositeType, memberCount, self.extraSpecs)
# Now that we have already checked that the pool is associated with
# the correct storage tier and the volume was successfully created
# add the volume to the default storage group created for
# volumes in pools associated with this fast policy.
- if extraSpecs[FASTPOLICY]:
+ if self.extraSpecs[FASTPOLICY]:
LOG.info(_LI(
"Adding volume: %(volumeName)s to default storage group"
" for FAST policy: %(fastPolicyName)s."),
{'volumeName': volumeName,
- 'fastPolicyName': extraSpecs[FASTPOLICY]})
+ 'fastPolicyName': self.extraSpecs[FASTPOLICY]})
defaultStorageGroupInstanceName = (
self._get_or_create_default_storage_group(
self.conn, storageSystemName, volumeDict,
- volumeName, extraSpecs[FASTPOLICY]))
+ volumeName, self.extraSpecs[FASTPOLICY]))
if not defaultStorageGroupInstanceName:
exceptionMessage = (_(
"Unable to create or get default storage group for "
"FAST policy: %(fastPolicyName)s.")
- % {'fastPolicyName': extraSpecs[FASTPOLICY]})
+ % {'fastPolicyName': self.extraSpecs[FASTPOLICY]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
self._add_volume_to_default_storage_group_on_create(
volumeDict, volumeName, storageConfigService,
- storageSystemName, extraSpecs[FASTPOLICY])
+ storageSystemName, self.extraSpecs[FASTPOLICY])
return rc, volumeDict, storageSystemName
def _create_v3_volume(
- self, volume, extraSpecs, volumeName, volumeSize):
- """create a volume (V3).
+ self, volume, volumeName, volumeSize):
+ """Create a volume (V3).
:param volume: the volume object
- :param extraSpecs:
- :param volumeName:
- :param volumeSize:
+ :param volumeName: the volume name
+ :param volumeSize: the volume size
:returns:
"""
isValidSLO, isValidWorkload = self.utils.verify_slo_workload(
- extraSpecs[SLO], extraSpecs[WORKLOAD])
+ self.extraSpecs[SLO], self.extraSpecs[WORKLOAD])
if not isValidSLO or not isValidWorkload:
exceptionMessage = (_(
"Either SLO: %(slo)s or workload %(workload)s is invalid. "
"Examine previous error statement for valid values.")
- % {'slo': extraSpecs[SLO],
- 'workload': extraSpecs[WORKLOAD]})
+ % {'slo': self.extraSpecs[SLO],
+ 'workload': self.extraSpecs[WORKLOAD]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
poolInstanceName, storageSystemName = (
- self._get_pool_and_storage_system(extraSpecs))
+ self._get_pool_and_storage_system())
LOG.debug("Create Volume: %(volume)s Pool: %(pool)s "
"Storage System: %(storageSystem)s "
maximumVolumeSize, minimumVolumeSize = (
self.provisionv3.get_volume_range(
self.conn, storageConfigService, poolInstanceName,
- extraSpecs[SLO], extraSpecs[WORKLOAD]))
+ self.extraSpecs[SLO], self.extraSpecs[WORKLOAD],
+ self.extraSpecs))
if not self.utils.is_in_range(
volumeSize, maximumVolumeSize, minimumVolumeSize):
LOG.warn(_LW(
'volumeSize': volumeSize,
'minimumVolumeSize': minimumVolumeSize,
'maximumVolumeSize': maximumVolumeSize,
- 'slo': extraSpecs[SLO],
- 'workload': extraSpecs[WORKLOAD]
+ 'slo': self.extraSpecs[SLO],
+ 'workload': self.extraSpecs[WORKLOAD]
})
# A volume created without specifying a storage group during
# creation time is allocated from the default SRP pool and
# assigned the optimized SLO.
sgInstanceName = self._get_or_create_storage_group_v3(
- extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD],
- storageSystemName)
+ self.extraSpecs[POOL], self.extraSpecs[SLO],
+ self.extraSpecs[WORKLOAD], storageSystemName)
volumeDict, rc = self.provisionv3.create_volume_from_sg(
self.conn, storageConfigService, volumeName,
- sgInstanceName, volumeSize)
+ sgInstanceName, volumeSize, self.extraSpecs)
return rc, volumeDict, storageSystemName
if sgInstanceName is None:
sgInstanceName = self.provisionv3.create_storage_group_v3(
self.conn, controllerConfigService, storageGroupName,
- poolName, slo, workload)
+ poolName, slo, workload, self.extraSpecs)
return sgInstanceName
return rc, modifiedVolumeDict
def _slo_workload_migration(self, volumeInstance, volume, host,
- volumeName, volumeStatus,
- extraSpecs, newType):
+ volumeName, volumeStatus, newType):
"""Migrate from SLO/Workload combination to another (V3).
:param volumeInstance: the volume instance
:param host: the host object
:param volumeName: the name of the volume
:param volumeStatus: the volume status
- :param extraSpecs: the extra specs dict
:param newType:
:returns: boolean
"""
volumeInstanceName = volumeInstance.path
isValid, targetSlo, targetWorkload = (
self._is_valid_for_storage_assisted_migration_v3(
- volumeInstanceName, host, extraSpecs[ARRAY],
- extraSpecs[POOL], volumeName, volumeStatus))
+ volumeInstanceName, host, self.extraSpecs[ARRAY],
+ self.extraSpecs[POOL], volumeName, volumeStatus))
storageSystemName = volumeInstance['SystemName']
'sourceHost': volume['host'],
'targetHost': host['host']})
return self._migrate_volume_v3(
- volume, volumeInstance, extraSpecs[POOL], targetSlo,
+ volume, volumeInstance, self.extraSpecs[POOL], targetSlo,
targetWorkload, storageSystemName, newType)
return False
controllerConfigService,
foundStorageGroupInstanceName,
volumeInstance.path,
- volumeName)
+ volumeName, self.extraSpecs)
# Check that it has been removed.
sgFromVolRemovedInstanceName = (
self.utils.wrap_get_storage_group_from_volume(
self.masking.add_volume_to_storage_group(
self.conn, controllerConfigService, targetSgInstanceName,
- volumeInstance, volumeName, storageGroupName)
+ volumeInstance, volumeName, storageGroupName, self.extraSpecs)
# Check that it has been added.
sgFromVolAddedInstanceName = (
self.utils.get_storage_group_from_volume(
return location_info, total_capacity_gb, free_capacity_gb
- def _set_v2_extra_specs(self, extraSpecs, configurationFile, arrayName):
+ def _set_v2_extra_specs(self, configurationFile, arrayName):
"""Set the VMAX V2 extra specs.
- :param extraSpecs: the extraSpecs (input)
:param configurationFile: the EMC configuration file
:param arrayName: the array serial number
:returns: extraSpecs (out)
"""
try:
- stripedMetaCount = extraSpecs[STRIPECOUNT]
- extraSpecs[MEMBERCOUNT] = stripedMetaCount
- extraSpecs[COMPOSITETYPE] = STRIPED
+ stripedMetaCount = self.extraSpecs[STRIPECOUNT]
+ self.extraSpecs[MEMBERCOUNT] = stripedMetaCount
+ self.extraSpecs[COMPOSITETYPE] = STRIPED
LOG.debug(
"There are: %(stripedMetaCount)s striped metas in "
{'stripedMetaCount': stripedMetaCount})
except KeyError:
memberCount = '1'
- extraSpecs[MEMBERCOUNT] = memberCount
- extraSpecs[COMPOSITETYPE] = CONCATENATED
+ self.extraSpecs[MEMBERCOUNT] = memberCount
+ self.extraSpecs[COMPOSITETYPE] = CONCATENATED
LOG.debug("StripedMetaCount is not in the extra specs.")
poolName = self.utils.parse_pool_name_from_file(configurationFile)
LOG.debug("The fast policy name is: %(fastPolicyName)s.",
{'fastPolicyName': fastPolicyName})
- extraSpecs[POOL] = poolName
- extraSpecs[ARRAY] = arrayName
- extraSpecs[FASTPOLICY] = fastPolicyName
- extraSpecs[ISV3] = False
+ self.extraSpecs[POOL] = poolName
+ self.extraSpecs[ARRAY] = arrayName
+ self.extraSpecs[FASTPOLICY] = fastPolicyName
+ self.extraSpecs[ISV3] = False
+ self.extraSpecs = self._get_job_extra_specs(configurationFile)
LOG.debug("Pool is: %(pool)s "
"Array is: %(array)s "
"FastPolicy is: %(fastPolicy)s "
"CompositeType is: %(compositeType)s "
"MemberCount is: %(memberCount)s.",
- {'pool': extraSpecs[POOL],
- 'array': extraSpecs[ARRAY],
- 'fastPolicy': extraSpecs[FASTPOLICY],
- 'compositeType': extraSpecs[COMPOSITETYPE],
- 'memberCount': extraSpecs[MEMBERCOUNT]})
- return extraSpecs
-
- def _set_v3_extra_specs(self, extraSpecs, configurationFile, arrayName):
+ {'pool': self.extraSpecs[POOL],
+ 'array': self.extraSpecs[ARRAY],
+ 'fastPolicy': self.extraSpecs[FASTPOLICY],
+ 'compositeType': self.extraSpecs[COMPOSITETYPE],
+ 'memberCount': self.extraSpecs[MEMBERCOUNT]})
+ return self.extraSpecs
+
+ def _set_v3_extra_specs(self, configurationFile, arrayName):
"""Set the VMAX V3 extra specs.
If SLO or workload are not specified then the default
:param arrayName: the array serial number
:returns: extraSpecs (out)
"""
- extraSpecs[SLO] = self.utils.parse_slo_from_file(configurationFile)
- extraSpecs[WORKLOAD] = self.utils.parse_workload_from_file(
+ self.extraSpecs[SLO] = self.utils.parse_slo_from_file(
+ configurationFile)
+ self.extraSpecs[WORKLOAD] = self.utils.parse_workload_from_file(
configurationFile)
- extraSpecs[POOL] = self.utils.parse_pool_name_from_file(
+ self.extraSpecs[POOL] = self.utils.parse_pool_name_from_file(
configurationFile)
- extraSpecs[ARRAY] = arrayName
- extraSpecs[ISV3] = True
+ self.extraSpecs[ARRAY] = arrayName
+ self.extraSpecs[ISV3] = True
+ self.extraSpecs = self._get_job_extra_specs(configurationFile)
LOG.debug("Pool is: %(pool)s "
"Array is: %(array)s "
"SLO is: %(slo)s "
"Workload is: %(workload)s.",
- {'pool': extraSpecs[POOL],
- 'array': extraSpecs[ARRAY],
- 'slo': extraSpecs[SLO],
- 'workload': extraSpecs[WORKLOAD]})
- return extraSpecs
+ {'pool': self.extraSpecs[POOL],
+ 'array': self.extraSpecs[ARRAY],
+ 'slo': self.extraSpecs[SLO],
+ 'workload': self.extraSpecs[WORKLOAD]})
+ return self.extraSpecs
+
+ def _get_job_extra_specs(self, configurationFile):
+ """Get user defined extra specs around job intervals and retries.
+
+ :param configurationFile: the EMC configuration file
+ :param arrayName: the array serial number
+ :returns: extraSpecs (out)
+ """
+ intervalInSecs = self.utils.parse_interval_from_file(
+ configurationFile)
+ if intervalInSecs is not None:
+ LOG.debug("The user defined interval is : %(intervalInSecs)s.",
+ {'intervalInSecs': intervalInSecs})
+ self.extraSpecs[INTERVAL] = intervalInSecs
+
+ retries = self.utils.parse_retries_from_file(
+ configurationFile)
+ if retries is not None:
+ LOG.debug("The user defined retries is : %(retries)s.",
+ {'retries': retries})
+ self.extraSpecs[RETRIES] = retries
+
+ return self.extraSpecs
def _delete_from_pool(self, storageConfigService, volumeInstance,
volumeName, deviceId, fastPolicyName):
defaultStorageGroupInstanceName = (
self.masking.remove_device_from_default_storage_group(
self.conn, controllerConfigurationService,
- volumeInstance.path, volumeName, fastPolicyName))
+ volumeInstance.path, volumeName, fastPolicyName,
+ self.extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.warn(_LW(
"The volume: %(volumename)s. was not first part of the "
try:
rc = self.provision.delete_volume_from_pool(
self.conn, storageConfigService, volumeInstance.path,
- volumeName)
+ volumeName, self.extraSpecs)
except Exception as e:
# If we cannot successfully delete the volume then we want to
self.fast
.add_volume_to_default_storage_group_for_fast_policy(
self.conn, controllerConfigurationService,
- volumeInstance, volumeName, fastPolicyName))
+ volumeInstance, volumeName, fastPolicyName,
+ self.extraSpecs))
if assocDefaultStorageGroupName is None:
LOG.error(_LE(
"Failed to Roll back to re-add volume %(volumeName)s "
# extra logic for case when volume is the last member.
sgFromVolInstanceName = self.masking.remove_and_reset_members(
self.conn, controllerConfigurationService, volumeInstance,
- None, volumeName, True, None, 'noReset')
+ volumeName, self.extraSpecs, None, 'noReset')
LOG.debug("Delete Volume: %(name)s Method: EMCReturnToStoragePool "
"ConfigServic: %(service)s TheElement: %(vol_instance)s "
try:
rc = self.provisionv3.delete_volume_from_pool(
self.conn, storageConfigService, volumeInstance.path,
- volumeName)
+ volumeName, self.extraSpecs)
except Exception as e:
# If we cannot successfully delete the volume, then we want to
self.masking.add_volume_to_storage_group(
self.conn, controllerConfigurationService,
storageGroupInstanceName, volumeInstance, volumeName,
- storageGroupName)
+ storageGroupName, self.extraSpecs)
LOG.error(_LE("Exception: %s."), e)
errorMessage = (_("Failed to delete volume %(volumeName)s.") %
return rc
def _create_clone_v2(self, repServiceInstanceName, cloneVolume,
- sourceVolume, sourceInstance, isSnapshot,
- extraSpecs):
+ sourceVolume, sourceInstance, isSnapshot):
"""Create a clone (v2).
:param repServiceInstanceName: the replication service
:param sourceVolume: the source volume object
:param sourceInstance: the device ID of the volume
:param isSnapshot: check to see if it is a snapshot
- :param fastPolicyName: the FAST policy name(if it exists)
:returns: rc
"""
# Check if the source volume contains any meta devices.
if metaHeadInstanceName is None: # Simple volume.
return self._create_v2_replica_and_delete_clone_relationship(
repServiceInstanceName, cloneVolume, sourceVolume,
- sourceInstance, None, isSnapshot, extraSpecs)
+ sourceInstance, None, isSnapshot)
else: # Composite volume with meta device members.
# Check if the meta members capacity.
metaMemberInstanceNames = (
LOG.debug("Meta volume all of the same size.")
return self._create_v2_replica_and_delete_clone_relationship(
repServiceInstanceName, cloneVolume, sourceVolume,
- sourceInstance, None, isSnapshot, extraSpecs)
+ sourceInstance, None, isSnapshot)
LOG.debug("Meta volumes are of different sizes, "
"%d different sizes.", len(set(volumeCapacities)))
volumeSizeInbits))}
rc, baseVolumeDict, storageSystemName = (
self._create_composite_volume(
- volume, extraSpecs,
- baseVolumeName, volumeSizeInbits))
+ volume, baseVolumeName, volumeSizeInbits))
baseTargetVolumeInstance = self.utils.find_volume_instance(
self.conn, baseVolumeDict, baseVolumeName)
LOG.debug("Base target volume %(targetVol)s created. "
self.utils.find_element_composition_service(
self.conn, storageSystemName))
compositeType = self.utils.get_composite_type(
- extraSpecs[COMPOSITETYPE])
+ self.extraSpecs[COMPOSITETYPE])
rc, modifiedVolumeDict = (
self._modify_and_get_composite_volume_instance(
self.conn,
LOG.debug("Create V2 replica for meta members of different sizes.")
return self._create_v2_replica_and_delete_clone_relationship(
repServiceInstanceName, cloneVolume, sourceVolume,
- sourceInstance, baseTargetVolumeInstance, isSnapshot,
- extraSpecs)
+ sourceInstance, baseTargetVolumeInstance, isSnapshot)
def _create_v2_replica_and_delete_clone_relationship(
self, repServiceInstanceName, cloneVolume, sourceVolume,
- sourceInstance, targetInstance, isSnapshot, extraSpecs):
+ sourceInstance, targetInstance, isSnapshot=False):
"""Create clone and delete relationship (v2).
:param repServiceInstanceName: the replication service
:param sourceInstance: the source volume instance
:param targetInstance: the target volume instance
:param isSnapshot: check to see if it is a snapshot
- :param extraSpecs: additional information
:returns: rc
"""
sourceName = sourceVolume['name']
cloneName = cloneVolume['name']
rc, job = self.provision.create_element_replica(
self.conn, repServiceInstanceName, cloneName, sourceName,
- sourceInstance, targetInstance)
+ sourceInstance, targetInstance, self.extraSpecs)
cloneDict = self.provision.get_volume_dict_from_job(
self.conn, job['Job'])
- fastPolicyName = extraSpecs[FASTPOLICY]
+ fastPolicyName = self.extraSpecs[FASTPOLICY]
if isSnapshot:
if fastPolicyName is not None:
storageSystemName = sourceInstance['SystemName']
# Remove the Clone relationship so it can be used as a regular lun.
# 8 - Detach operation.
rc, job = self.provision.delete_clone_relationship(
- self.conn, repServiceInstanceName, syncInstanceName)
+ self.conn, repServiceInstanceName, syncInstanceName,
+ self.extraSpecs)
if fastPolicyName is not None:
self._add_clone_to_default_storage_group(
fastPolicyName, storageSystemName, cloneDict, cloneName)
syncType = self.utils.get_num(8, '16') # Default syncType 8: clone.
# Create target volume
- extraSpecs = self._initial_setup(cloneVolume)
+ self.extraSpecs = self._initial_setup(cloneVolume)
numOfBlocks = sourceInstance['NumberOfBlocks']
blockSize = sourceInstance['BlockSize']
int(self.utils.convert_bits_to_gbs(volumeSizeInbits))}
_, volumeDict, storageSystemName = (
self._create_v3_volume(
- volume, extraSpecs, cloneName, volumeSizeInbits))
+ volume, cloneName, volumeSizeInbits))
targetInstance = self.utils.find_volume_instance(
self.conn, volumeDict, cloneName)
LOG.debug("Create replica target volume "
_, job = (
self.provisionv3.create_element_replica(
self.conn, repServiceInstanceName, cloneName, syncType,
- sourceInstance, targetInstance))
+ sourceInstance, self.extraSpecs, targetInstance))
cloneDict = self.provisionv3.get_volume_dict_from_job(
self.conn, job['Job'])
rc, job = self.provisionv3.break_replication_relationship(
self.conn, repServiceInstanceName, syncInstanceName,
- operation)
+ operation, self.extraSpecs)
return rc, cloneDict
def _delete_cg_and_members(
- self, storageSystem, extraSpecs, cgName, modelUpdate, volumes):
+ self, storageSystem, cgName, modelUpdate, volumes):
"""Helper function to delete a consistency group and its member volumes.
:param storageSystem: storage system
cgInstanceName)
self.provision.delete_consistency_group(
- self.conn, replicationService, cgInstanceName, cgName)
+ self.conn, replicationService, cgInstanceName, cgName,
+ self.extraSpecs)
if memberInstanceNames:
try:
{'cg': cgInstanceName,
'numVols': len(memberInstanceNames),
'memVols': memberInstanceNames})
- if extraSpecs[ISV3]:
+ if self.extraSpecs[ISV3]:
self.provisionv3.delete_volume_from_pool(
self.conn, storageConfigservice,
- memberInstanceNames, None)
+ memberInstanceNames, None, self.extraSpecs)
else:
self.provision.delete_volume_from_pool(
self.conn, storageConfigservice,
- memberInstanceNames, None)
+ memberInstanceNames, None, self.extraSpecs)
for volumeRef in volumes:
volumeRef['status'] = 'deleted'
except Exception:
def add_volume_to_default_storage_group_for_fast_policy(
self, conn, controllerConfigService, volumeInstance,
- volumeName, fastPolicyName):
+ volumeName, fastPolicyName, extraSpecs):
"""Add a volume to the default storage group for FAST policy.
The storage group must pre-exist. Once added to the storage group,
:param volumeInstance: the volume instance
:param volumeName: the volume name (String)
:param fastPolicyName: the fast policy name (String)
+ :param extraSpecs: additional info
:returns: assocStorageGroupInstanceName - the storage group
associated with the volume
"""
self.provision.add_members_to_masking_group(
conn, controllerConfigService, storageGroupInstanceName,
- volumeInstance.path, volumeName)
+ volumeInstance.path, volumeName, extraSpecs)
# Check to see if the volume is in the storage group.
assocStorageGroupInstanceName = (
self.utils.get_storage_group_from_volume(conn,
def _create_default_storage_group(self, conn, controllerConfigService,
fastPolicyName, storageGroupName,
- volumeInstance):
+ volumeInstance, extraSpecs):
"""Create a first volume for the storage group.
This is necessary because you cannot remove a volume if it is the
:param fastPolicyName: the fast policy name (String)
:param storageGroupName: the storage group name (String)
:param volumeInstance: the volume instance
+ :param extraSpecs: additional info
:returns: defaultstorageGroupInstanceName - instance name of the
default storage group
"""
failedRet = None
firstVolumeInstance = self._create_volume_for_default_volume_group(
- conn, controllerConfigService, volumeInstance.path)
+ conn, controllerConfigService, volumeInstance.path, extraSpecs)
if firstVolumeInstance is None:
LOG.error(_LE(
"Failed to create a first volume for storage "
defaultStorageGroupInstanceName = (
self.provision.create_and_get_storage_group(
conn, controllerConfigService, storageGroupName,
- firstVolumeInstance.path))
+ firstVolumeInstance.path, extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.error(_LE(
"Failed to create default storage group for "
self.add_storage_group_to_tier_policy_rule(
conn, tierPolicyServiceInstanceName,
defaultStorageGroupInstanceName, tierPolicyRuleInstanceName,
- storageGroupName, fastPolicyName)
+ storageGroupName, fastPolicyName, extraSpecs)
return defaultStorageGroupInstanceName
def _create_volume_for_default_volume_group(
- self, conn, controllerConfigService, volumeInstanceName):
+ self, conn, controllerConfigService, volumeInstanceName,
+ extraSpecs):
"""Creates a volume for the default storage group for a fast policy.
Creates a small first volume for the default storage group for a
:param conn: the connection information to the ecom server
:param controllerConfigService: the controller configuration service
:param volumeInstanceName: the volume instance name
+ :param extraSpecs: additional info
:returns: firstVolumeInstanceName - instance name of the first volume
in the storage group
"""
volumeDict, _ = (
self.provision.create_volume_from_pool(
conn, storageConfigurationInstanceName, volumeName,
- poolInstanceName, volumeSize))
+ poolInstanceName, volumeSize, extraSpecs))
firstVolumeInstanceName = self.utils.find_volume_instance(
conn, volumeDict, volumeName)
return firstVolumeInstanceName
def add_storage_group_to_tier_policy_rule(
self, conn, tierPolicyServiceInstanceName,
storageGroupInstanceName, tierPolicyRuleInstanceName,
- storageGroupName, fastPolicyName):
+ storageGroupName, fastPolicyName, extraSpecs):
"""Add the storage group to the tier policy rule.
:param conn: the connection information to the ecom server
:param tierPolicyRuleInstanceName: tier policy instance name
:param storageGroupName: the storage group name (String)
:param fastPolicyName: the fast policy name (String)
+ :param extraSpecs: additional info
"""
# 5 is ("Add InElements to Policy").
modificationType = '5'
Operation=self.utils.get_num(modificationType, '16'),
InElements=[storageGroupInstanceName])
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error associating storage group : %(storageGroupName)s. "
def add_storage_group_and_verify_tier_policy_assoc(
self, conn, controllerConfigService, storageGroupInstanceName,
- storageGroupName, fastPolicyName):
+ storageGroupName, fastPolicyName, extraSpecs):
"""Adds a storage group to a tier policy and verifies success.
Add a storage group to a tier policy rule and verify that it was
:param storageGroupInstanceName: the storage group instance name
:param storageGroupName: the storage group name (String)
:param fastPolicyName: the fast policy name (String)
+ :param extraSpecs: additional info
:returns: assocTierPolicyInstanceName
"""
failedRet = None
self.add_storage_group_to_tier_policy_rule(
conn, tierPolicyServiceInstanceName,
storageGroupInstanceName, tierPolicyRuleInstanceName,
- storageGroupName, fastPolicyName)
+ storageGroupName, fastPolicyName, extraSpecs)
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
LOG.error(_LE(
def delete_storage_group_from_tier_policy_rule(
self, conn, tierPolicyServiceInstanceName,
- storageGroupInstanceName, tierPolicyRuleInstanceName):
+ storageGroupInstanceName, tierPolicyRuleInstanceName,
+ extraSpecs):
"""Disassociate the storage group from its tier policy rule.
:param conn: connection the ecom server
:param storageGroupInstanceName: instance name of the storage group
:param tierPolicyRuleInstanceName: instance name of the tier policy
associated with the storage group
+ :param extraSpecs: additional information
"""
modificationType = '6'
LOG.debug("Invoking ModifyStorageTierPolicyRule %s.",
Operation=self.utils.get_num(modificationType, '16'),
InElements=[storageGroupInstanceName])
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
LOG.error(_LE("Error disassociating storage group from "
"policy: %s."), errordesc)
def get_or_create_default_storage_group(
self, conn, controllerConfigService, fastPolicyName,
- volumeInstance):
+ volumeInstance, extraSpecs):
"""Create or get a default storage group for FAST policy.
:param conn: the ecom connection
:param controllerConfigService: the controller configuration service
:param fastPolicyName: the fast policy name (String)
:param volumeInstance: the volume instance
+ :param extraSpecs: additional info
:returns: defaultStorageGroupInstanceName - the default storage group
instance name
"""
controllerConfigService,
fastPolicyName,
defaultSgGroupName,
- volumeInstance))
+ volumeInstance,
+ extraSpecs))
return defaultStorageGroupInstanceName
FC = 'fc'
EMC_ROOT = 'root/emc'
+FASTPOLICY = 'storagetype:fastpolicy'
+ISV3 = 'isV3'
class EMCVMAXMasking(object):
self.provision = emc_vmax_provision.EMCVMAXProvision(prtcl)
self.provisionv3 = emc_vmax_provision_v3.EMCVMAXProvisionV3(prtcl)
- def get_or_create_masking_view_and_map_lun(self, conn, maskingViewDict):
+ def get_or_create_masking_view_and_map_lun(self, conn, maskingViewDict,
+ extraSpecs):
"""Get or Create a masking view and add a volume to the storage group.
Given a masking view tuple either get or create a masking view and add
:param conn: the connection to ecom
:para maskingViewDict: the masking view tuple
+ :param extraSpecs: additional info
:returns: dict rollbackDict
"""
rollbackDict = {}
volumeName = maskingViewDict['volumeName']
isV3 = maskingViewDict['isV3']
isLiveMigration = maskingViewDict['isLiveMigration']
+ maskingViewDict['extraSpecs'] = extraSpecs
defaultStorageGroupInstanceName = None
fastPolicyName = None
assocStorageGroupName = None
self._get_and_remove_from_storage_group_v2(
conn, controllerConfigService,
volumeInstance.path,
- volumeName, fastPolicyName))
+ volumeName, fastPolicyName,
+ extraSpecs))
# Validate new or existing masking view.
# Return the storage group so we can add the volume to it.
rollbackDict['volumeName'] = volumeName
rollbackDict['fastPolicyName'] = fastPolicyName
rollbackDict['isV3'] = isV3
+ rollbackDict['extraSpecs'] = extraSpecs
if errorMessage:
# Rollback code if we cannot complete any of the steps above
controllerConfigService = maskingViewDict['controllerConfigService']
sgGroupName = maskingViewDict['sgGroupName']
volumeInstance = maskingViewDict['volumeInstance']
- storageSystemName = maskingViewDict['storageSystemName']
volumeName = maskingViewDict['volumeName']
msg = None
if self._is_volume_in_storage_group(
self.add_volume_to_storage_group(
conn, controllerConfigService,
storageGroupInstanceName, volumeInstance, volumeName,
- sgGroupName, storageSystemName)
+ sgGroupName, maskingViewDict['extraSpecs'])
if not self._is_volume_in_storage_group(
conn, storageGroupInstanceName,
volumeInstance):
def _get_and_remove_from_storage_group_v2(
self, conn, controllerConfigService, volumeInstanceName,
- volumeName, fastPolicyName):
+ volumeName, fastPolicyName, extraSpecs):
"""Get the storage group and remove volume from it.
:param controllerConfigService - controller configuration service
:param volumeInstanceName - volume instance name
:param volumeName - volume name
:param fastPolicyName - fast name
+ :param extraSpecs: additional info
"""
defaultStorageGroupInstanceName = (
self.fast.get_and_verify_default_storage_group(
retStorageGroupInstanceName = (
self.remove_device_from_default_storage_group(
conn, controllerConfigService, volumeInstanceName,
- volumeName, fastPolicyName))
+ volumeName, fastPolicyName, extraSpecs))
if retStorageGroupInstanceName is None:
exceptionMessage = (_(
"Failed to remove volume %(volumeName)s from default SG: "
self.provision.remove_device_from_storage_group(
conn, controllerConfigService, storageGroupInstanceName,
- volumeInstanceName, volumeName)
+ volumeInstanceName, volumeName, maskingViewDict['extraSpecs'])
assocVolumeInstanceNames = self.get_devices_from_storage_group(
conn, storageGroupInstanceName)
foundStorageGroupInstanceName = (
self.provisionv3.create_storage_group_v3(
conn, controllerConfigService, storageGroupName,
- pool, slo, workload))
+ pool, slo, workload, maskingViewDict['extraSpecs']))
else:
fastPolicyName = maskingViewDict['fastPolicy']
volumeInstance = maskingViewDict['volumeInstance']
foundStorageGroupInstanceName = (
self.provision.create_and_get_storage_group(
conn, controllerConfigService, storageGroupName,
- volumeInstance.path))
+ volumeInstance.path, maskingViewDict['extraSpecs']))
if (fastPolicyName is not None and
defaultStorageGroupInstanceName is not None):
assocTierPolicyInstanceName = (
self.fast.add_storage_group_and_verify_tier_policy_assoc(
conn, controllerConfigService,
foundStorageGroupInstanceName,
- storageGroupName, fastPolicyName))
+ storageGroupName, fastPolicyName,
+ maskingViewDict['extraSpecs']))
if assocTierPolicyInstanceName is None:
LOG.error(_LE(
"Cannot add and verify tier policy association for "
rollbackDict['controllerConfigService'],
rollbackDict['volumeInstance'],
rollbackDict['volumeName'],
- rollbackDict['fastPolicyName']))
+ rollbackDict['fastPolicyName'],
+ rollbackDict['extraSpecs']))
if assocDefaultStorageGroupName is None:
LOG.error(_LE(
"Failed to Roll back to re-add volume "
rollbackDict['controllerConfigService'],
rollbackDict['volumeInstance'],
rollbackDict['fastPolicyName'],
- rollbackDict['volumeName'], False)
+ rollbackDict['volumeName'], rollbackDict['extraSpecs'],
+ False)
except Exception as e:
LOG.error(_LE("Exception: %s."), e)
errorMessage = (_(
def add_volume_to_storage_group(
self, conn, controllerConfigService, storageGroupInstanceName,
- volumeInstance, volumeName, sgGroupName, storageSystemName=None):
+ volumeInstance, volumeName, sgGroupName, extraSpecs):
"""Add a volume to an existing storage group.
:param conn: connection to ecom server
:param volumeInstance: the volume instance
:param volumeName: the name of the volume (String)
:param sgGroupName: the name of the storage group (String)
- :param storageSystemName: the storage system name (Optional Parameter),
- if None plain operation assumed
+ :param extraSpecs: additional info
:returns: int rc the return code of the job
:returns: dict the job dict
"""
self.provision.add_members_to_masking_group(
conn, controllerConfigService, storageGroupInstanceName,
- volumeInstance.path, volumeName)
+ volumeInstance.path, volumeName, extraSpecs)
LOG.info(_LI(
"Added volume: %(volumeName)s to existing storage group "
def remove_device_from_default_storage_group(
self, conn, controllerConfigService, volumeInstanceName,
- volumeName, fastPolicyName):
+ volumeName, fastPolicyName, extraSpecs):
"""Remove the volume from the default storage group.
Remove the volume from the default storage group for the FAST
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param fastPolicyName: the fast policy name (String)
+ :param extraSpecs: additional info
:returns: instance name defaultStorageGroupInstanceName
"""
failedRet = None
self.provision.remove_device_from_storage_group(
conn, controllerConfigService, defaultStorageGroupInstanceName,
- volumeInstanceName, volumeName)
+ volumeInstanceName, volumeName, extraSpecs)
assocVolumeInstanceNames = self.get_devices_from_storage_group(
conn, defaultStorageGroupInstanceName)
def remove_and_reset_members(
self, conn, controllerConfigService, volumeInstance,
- fastPolicyName, volumeName, isV3, connector=None, noReset=None):
+ volumeName, extraSpecs, connector=None, noReset=None):
"""Part of unmap device or rollback.
Removes volume from the Device Masking Group that belongs to a
:param conn: connection the the ecom server
:param controllerConfigService: the controller configuration service
:param volumeInstance: the volume Instance
- :param fastPolicyName: the fast policy name (if it exists)
:param volumeName: the volume name
- :param isV3: is array v2 or v3
:param connector: optional
:param noReset: optional, if none, then reset
:returns: maskingGroupInstanceName
"""
+ isV3 = extraSpecs[ISV3]
+ fastPolicyName = extraSpecs.get(FASTPOLICY, None)
+
storageGroupInstanceName = None
if connector is not None:
storageGroupInstanceName = self._get_sg_associated_with_connector(
isTieringPolicySupported,
tierPolicyServiceInstanceName,
storageSystemInstanceName['Name'],
- storageGroupInstanceName)
+ storageGroupInstanceName, extraSpecs)
self.provision.remove_device_from_storage_group(
conn, controllerConfigService, storageGroupInstanceName,
- volumeInstance.path, volumeName)
+ volumeInstance.path, volumeName, extraSpecs)
LOG.debug(
"Remove the last volume %(volumeName)s completed "
if noReset is None:
self._return_volume_to_default_storage_group_v3(
conn, controllerConfigService, storageGroupName,
- volumeInstance, volumeName, storageSystemInstanceName)
+ volumeInstance, volumeName, storageSystemInstanceName,
+ extraSpecs)
else:
if isTieringPolicySupported:
self._cleanup_tiering(
conn, controllerConfigService, fastPolicyName,
- volumeInstance, volumeName)
+ volumeInstance, volumeName, extraSpecs)
else:
# Not the last volume so remove it from storage group in
# the masking view.
"%(numVol)d.", {'numVol': len(volumeInstanceNames)})
self.provision.remove_device_from_storage_group(
conn, controllerConfigService, storageGroupInstanceName,
- volumeInstance.path, volumeName)
+ volumeInstance.path, volumeName, extraSpecs)
LOG.debug(
"RemoveMembers for volume %(volumeName)s completed "
if isV3:
self._return_volume_to_default_storage_group_v3(
conn, controllerConfigService, storageGroupName,
- volumeInstance, volumeName, storageSystemInstanceName)
+ volumeInstance, volumeName, storageSystemInstanceName,
+ extraSpecs)
else:
# V2 if FAST POLICY enabled, move the volume to the default
# SG.
if fastPolicyName is not None and isTieringPolicySupported:
self._cleanup_tiering(
conn, controllerConfigService, fastPolicyName,
- volumeInstance, volumeName)
+ volumeInstance, volumeName, extraSpecs)
volumeInstanceNames = self.get_devices_from_storage_group(
conn, storageGroupInstanceName)
def _get_and_remove_rule_association(
self, conn, fastPolicyName, isTieringPolicySupported,
tierPolicyServiceInstanceName, storageSystemName,
- storageGroupInstanceName):
+ storageGroupInstanceName, extraSpecs):
"""Remove the storage group from the policy rule.
:param conn: the ecom connection
:param tierPolicyServiceInstanceName: the tier policy instance name
:param storageSystemName: storage system name
:param storageGroupInstanceName: the storage group instance name
+ :param extraSpecs: additional info
"""
# Disassociate storage group from FAST policy.
if fastPolicyName is not None and isTieringPolicySupported is True:
self.fast.delete_storage_group_from_tier_policy_rule(
conn, tierPolicyServiceInstanceName,
- storageGroupInstanceName, tierPolicyInstanceName)
+ storageGroupInstanceName, tierPolicyInstanceName, extraSpecs)
def _return_volume_to_default_storage_group_v3(
self, conn, controllerConfigService, storageGroupName,
- volumeInstance, volumeName, storageSystemInstanceName):
+ volumeInstance, volumeName, storageSystemInstanceName,
+ extraSpecs):
"""Return volume to the default storage group in v3.
:param conn: the ecom connection
:param volumeInstance: volumeInstance
:param volumeName: the volume name
:param storageSystemInstanceName: the storage system instance name
+ :param extraSpecs: additional info
"""
# First strip the shortHostname from the storage group name.
defaultStorageGroupName, shorthostName = (
def _cleanup_tiering(
self, conn, controllerConfigService, fastPolicyName,
- volumeInstance, volumeName):
+ volumeInstance, volumeName, extraSpecs):
"""Clea nup tiering.
:param conn: the ecom connection
:param fastPolicyName: the fast policy name
:param volumeInstance: volume instance
:param volumeName: the volume name
+ :param extraSpecs: additional info
"""
defaultStorageGroupInstanceName = (
self.fast.get_policy_default_storage_group(
defaultStorageGroupInstanceName = (
self.fast.add_volume_to_default_storage_group_for_fast_policy(
conn, controllerConfigService, volumeInstance, volumeName,
- fastPolicyName))
+ fastPolicyName, extraSpecs))
# Check default storage group number of volumes.
volumeInstanceNames = self.get_devices_from_storage_group(
conn, defaultStorageGroupInstanceName)
self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
def delete_volume_from_pool(
- self, conn, storageConfigservice, volumeInstanceName, volumeName):
+ self, conn, storageConfigservice, volumeInstanceName, volumeName,
+ extraSpecs):
"""Given the volume instance remove it from the pool.
:param conn: connection the the ecom server
:param storageConfigservice: volume created from job
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
- :param
+ :param extraSpecs: additional info
:param rc: return code
"""
startTime = time.time()
TheElements=theElements)
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error Delete Volume: %(volumeName)s. "
def create_volume_from_pool(
self, conn, storageConfigService, volumeName,
- poolInstanceName, volumeSize):
+ poolInstanceName, volumeSize, extraSpecs):
"""Create the volume in the specified pool.
:param conn: the connection information to the ecom server
:param poolInstanceName: the pool instance name to create
the dummy volume in
:param volumeSize: volume size (String)
+ :param extraSpecs: additional info
:returns: volumeDict - the volume dict
"""
startTime = time.time()
'rc': rc})
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error Create Volume: %(volumeName)s. "
return volumeDict, rc
def create_and_get_storage_group(self, conn, controllerConfigService,
- storageGroupName, volumeInstanceName):
+ storageGroupName, volumeInstanceName,
+ extraSpecs):
"""Create a storage group and return it.
:param conn: the connection information to the ecom server
:param controllerConfigService: the controller configuration service
:param storageGroupName: the storage group name (String
:param volumeInstanceName: the volume instance name
+ :param extraSpecs: additional info
:returns: foundStorageGroupInstanceName - instance name of the
default storage group
"""
Members=[volumeInstanceName])
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error Create Group: %(groupName)s. "
return foundStorageGroupInstanceName
def create_storage_group_no_members(
- self, conn, controllerConfigService, groupName):
+ self, conn, controllerConfigService, groupName, extraSpecs):
"""Create a new storage group that has no members.
:param conn: connection the ecom server
:param controllerConfigService: the controller configuration service
:param groupName: the proposed group name
+ :param extraSpecs: additional info
:returns: foundStorageGroupInstanceName - the instance Name of
the storage group
"""
DeleteWhenBecomesUnassociated=False)
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error Create Group: %(groupName)s. "
def remove_device_from_storage_group(
self, conn, controllerConfigService, storageGroupInstanceName,
- volumeInstanceName, volumeName):
+ volumeInstanceName, volumeName, extraSpecs):
"""Remove a volume from a storage group.
:param conn: the connection to the ecom server
:param storageGroupInstanceName: the instance name of the storage group
:param volumeInstanceName: the instance name of the volume
:param volumeName: the volume name (String)
+ :param extraSpecs: additional info
:returns: rc - the return code of the job
"""
startTime = time.time()
MaskingGroup=storageGroupInstanceName,
Members=[volumeInstanceName])
if rc != 0L:
- rc, errorDesc = self.utils.wait_for_job_complete(conn, jobDict)
+ rc, errorDesc = self.utils.wait_for_job_complete(conn, jobDict,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error removing volume %(vol)s. %(error)s.")
def add_members_to_masking_group(
self, conn, controllerConfigService, storageGroupInstanceName,
- volumeInstanceName, volumeName):
+ volumeInstanceName, volumeName, extraSpecs):
"""Add a member to a masking group group.
:param conn: the connection to the ecom server
:param storageGroupInstanceName: the instance name of the storage group
:param volumeInstanceName: the instance name of the volume
:param volumeName: the volume name (String)
+ :param extraSpecs: additional info
"""
startTime = time.time()
Members=[volumeInstanceName])
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error mapping volume %(vol)s. %(error)s.")
def unbind_volume_from_storage_pool(
self, conn, storageConfigService, poolInstanceName,
- volumeInstanceName, volumeName):
+ volumeInstanceName, volumeName, extraSpecs):
"""Unbind a volume from a pool and return the unbound volume.
:param conn: the connection information to the ecom server
:param poolInstanceName: the pool instance name
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name
+ :param extraSpecs: additional info
:returns: unboundVolumeInstance - the unbound volume instance
"""
startTime = time.time()
TheElement=volumeInstanceName)
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error unbinding volume %(vol)s from pool. %(error)s.")
def modify_composite_volume(
self, conn, elementCompositionService, theVolumeInstanceName,
- inVolumeInstanceName):
+ inVolumeInstanceName, extraSpecs):
"""Given a composite volume add a storage volume to it.
:param theVolumeInstanceName: the existing composite volume
:param inVolumeInstanceName: the volume you wish to add to the
composite volume
+ :param extraSpecs: additional info
:returns: rc - return code
:returns: job - job
"""
InElements=[inVolumeInstanceName])
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error adding volume to composite volume. "
def create_composite_volume(
self, conn, elementCompositionService, volumeSize, volumeName,
- poolInstanceName, compositeType, numMembers):
+ poolInstanceName, compositeType, numMembers, extraSpecs):
"""Create a new volume using the auto meta feature.
:param conn: the connection the the ecom server
e.g striped/concatenated
:param numMembers: the number of meta members to make up the composite.
If it is 1 then a non composite is created
+ :param extraSpecs: additional info
:returns: rc
:returns: errordesc
"""
EMCNumberOfMembers=self.utils.get_num(numMembers, '32'))
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error Create Volume: %(volumename)s. "
def create_new_composite_volume(
self, conn, elementCompositionService, compositeHeadInstanceName,
- compositeMemberInstanceName, compositeType):
+ compositeMemberInstanceName, compositeType, extraSpecs):
"""Creates a new composite volume.
Given a bound composite head and an unbound composite member
:param compositeMemberInstanceName: the composite member.
This must be unbound
:param compositeType: the composite type e.g striped or concatenated
+ :param extraSpecs: additional info
:returns: rc - return code
:returns: errordesc - descriptions of the error
"""
CompositeType=self.utils.get_num(compositeType, '16'))
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error Creating new composite Volume Return code: "
def _migrate_volume(
self, conn, storageRelocationServiceInstanceName,
- volumeInstanceName, targetPoolInstanceName):
+ volumeInstanceName, targetPoolInstanceName, extraSpecs):
"""Migrate a volume to another pool.
:param conn: the connection to the ecom server
service
:param volumeInstanceName: the volume to be migrated
:param targetPoolInstanceName: the target pool to migrate the volume to
+ :param extraSpecs: additional info
:returns: rc - return code
"""
startTime = time.time()
TargetPool=targetPoolInstanceName)
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error Migrating volume from one pool to another. "
def migrate_volume_to_storage_pool(
self, conn, storageRelocationServiceInstanceName,
- volumeInstanceName, targetPoolInstanceName):
+ volumeInstanceName, targetPoolInstanceName, extraSpecs):
"""Given the storage system name, get the storage relocation service.
:param conn: the connection to the ecom server
:param volumeInstanceName: the volume to be migrated
:param targetPoolInstanceName: the target pool to migrate the
volume to.
+ :param extraSpecs: additional info
:returns: rc
"""
LOG.debug(
try:
rc = self._migrate_volume(
conn, storageRelocationServiceInstanceName,
- volumeInstanceName, targetPoolInstanceName)
+ volumeInstanceName, targetPoolInstanceName, extraSpecs)
except Exception as ex:
if 'source of a migration session' in six.text_type(ex):
try:
rc = self._terminate_migrate_session(
- conn, volumeInstanceName)
+ conn, volumeInstanceName, extraSpecs)
except Exception as ex:
LOG.error(_LE('Exception: %s.'), ex)
exceptionMessage = (_(
try:
rc = self._migrate_volume(
conn, storageRelocationServiceInstanceName,
- volumeInstanceName, targetPoolInstanceName)
+ volumeInstanceName, targetPoolInstanceName,
+ extraSpecs)
except Exception as ex:
LOG.error(_LE('Exception: %s'), ex)
exceptionMessage = (_(
return rc
- def _terminate_migrate_session(self, conn, volumeInstanceName):
+ def _terminate_migrate_session(self, conn, volumeInstanceName,
+ extraSpecs):
"""Given the volume instance terminate a migrate session.
:param conn: the connection to the ecom server
:param volumeInstanceName: the volume to be migrated
+ :param extraSpecs: additional info
:returns: rc
"""
startTime = time.time()
'RequestStateChange', volumeInstanceName,
RequestedState=self.utils.get_num(32769, '16'))
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error Terminating migrate session. "
def create_element_replica(
self, conn, repServiceInstanceName, cloneName,
- sourceName, sourceInstance, targetInstance, copyOnWrite=False):
+ sourceName, sourceInstance, targetInstance, extraSpecs,
+ copyOnWrite=False):
"""Make SMI-S call to create replica for source element.
:param conn - the connection to the ecom server
:param cloneName - replica name
:param sourceName - source volume name
:param sourceInstance - source volume instance
+ :param extraSpecs: additional info
+ :param copyOnWrite: optional
:returns: rc - return code
:returns: job - job object of the replica creation operation
ReplicationType=self.utils.get_num(10, '16'))
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, rsd)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, rsd,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error creating cloned volume using "
TargetElement=targetInstance.path)
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error Create Cloned Volume: "
return rc, job
def delete_clone_relationship(
- self, conn, repServiceInstanceName, syncInstanceName, force=False):
+ self, conn, repServiceInstanceName, syncInstanceName, extraSpecs,
+ force=False):
"""Deletes the relationship between the clone and source volume.
Makes an SMI-S call to break clone relationship between the clone
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
+ :param extraSpecs - additional info
+ :param force - optional param
:returns: rc - return code
:returns: job - job object of the replica creation operation
"""
'rc': rc})
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error break clone relationship: "
return rc, targetEndpoints
def create_consistency_group(
- self, conn, replicationService, consistencyGroupName):
+ self, conn, replicationService, consistencyGroupName, extraSpecs):
"""Create a new consistency group.
:param conn: the connection to the ecom server
:param replicationService: the replication Service
:param consistencyGroupName: the CG group name
+ :param extraSpecs - additional info
:returns: rc
:returns: job
"""
GroupName=consistencyGroupName)
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Failed to create consistency group: "
def delete_consistency_group(
self, conn, replicationService, cgInstanceName,
- consistencyGroupName):
+ consistencyGroupName, extraSpecs):
"""Delete a consistency group.
:param replicationService: the replication Service
:param cgInstanceName: the CG instance name
:param consistencyGroupName: the CG group name
+ :param extraSpecs - additional info
:returns: rc
:returns: job
"""
RemoveElements=True)
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Failed to delete consistency group: "
def add_volume_to_cg(
self, conn, replicationService, cgInstanceName,
- volumeInstanceName, cgName, volumeName):
+ volumeInstanceName, cgName, volumeName, extraSpecs):
"""Add a volume to a consistency group.
:param conn: the connection to the ecom server
:param cgInstanceName: the CG instance name
:param cgName: the CG group name
:param volumeName: the volume name
+ :param extraSpecs - additional info
:returns: rc
:returns: job
"""
ReplicationGroup=cgInstanceName)
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Failed to add volume %(volumeName)s: "
def remove_volume_from_cg(
self, conn, replicationService, cgInstanceName,
- volumeInstanceName, cgName, volumeName):
+ volumeInstanceName, cgName, volumeName, extraSpecs):
"""Remove a volume from a consistency group.
:param conn: the connection to the ecom server
:param cgInstanceName: the CG instance name
:param cgName: the CG group name
:param volumeName: the volume name
+ :param extraSpecs - additional info
:returns: rc
:returns: job
"""
RemoveElements=True)
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Failed to remove volume %(volumeName)s: "
def create_group_replica(
self, conn, replicationService,
- srcGroupInstanceName, tgtGroupInstanceName, relationName):
+ srcGroupInstanceName, tgtGroupInstanceName, relationName,
+ extraSpecs):
"""Make SMI-S call to create replica for source group.
:param conn - the connection to the ecom server
:param repServiceInstanceName - replication service
:param srcGroupInstanceName - source group instance name
:param tgtGroupInstanceName - target group instance name
- :param cgName - target group name
+ :param relationName -
+ :param extraSpecs - additional info
:returns: rc - return code
:returns: job - job object of the replica creation operation
SyncType=self.utils.get_num(8, '16'))
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMsg = (_("Error CreateGroupReplica: "
"source: %(source)s target: %(target)s. "
self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
def delete_volume_from_pool(
- self, conn, storageConfigservice, volumeInstanceName, volumeName):
+ self, conn, storageConfigservice, volumeInstanceName, volumeName,
+ extraSpecs):
"""Given the volume instance remove it from the pool.
:param conn: connection the the ecom server
:param storageConfigservice: volume created from job
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
+ :param extraSpecs: additional info
:returns: rc -- return code
"""
startTime = time.time()
TheElements=theElements)
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error Delete Volume: %(volumeName)s. "
def create_volume_from_sg(
self, conn, storageConfigService, volumeName,
- sgInstanceName, volumeSize):
+ sgInstanceName, volumeSize, extraSpecs):
"""Create the volume and associate it with a storage group.
We use EMCCollections parameter to supply a Device Masking Group
:param sgInstanceName: the storage group instance name
associated with an SLO
:param volumeSize: volume size (String)
+ :param extraSpecs: additional info
:returns: volumeDict - the volume dict
:returns: rc - return code
"""
'rc': rc})
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error Create Volume: %(volumeName)s. "
def create_element_replica(
self, conn, repServiceInstanceName,
- cloneName, syncType, sourceInstance, targetInstance=None):
+ cloneName, syncType, sourceInstance, extraSpecs,
+ targetInstance=None):
"""Make SMI-S call to create replica for source element.
:param conn - the connection to the ecom server
:param cloneName - clone volume name
:param syncType - 7: snapshot, 8: clone
:param sourceInstance - source volume instance
+ :param extraSpecs: additional info
:param targetInstance - target volume instance
:returns: rc - return code
:returns: job - job object of the replica creation operation
TargetElement=targetInstance.path)
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error Create Cloned Volume: %(cloneName)s "
def break_replication_relationship(
self, conn, repServiceInstanceName, syncInstanceName,
- operation, force=False):
+ operation, extraSpecs, force=False):
"""Deletes the relationship between the clone/snap and source volume.
Makes an SMI-S call to break clone relationship between the clone
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param operation: operation code
+ :param extraSpecs: additional info
:param force: force to break replication relationship if True
:returns: rc - return code
:returns: job - job object of the replica creation operation
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
- conn, repServiceInstanceName, syncInstanceName, operation, force)
+ conn, repServiceInstanceName, syncInstanceName, operation,
+ extraSpecs, force)
def create_storage_group_v3(self, conn, controllerConfigService,
- groupName, srp, slo, workload):
+ groupName, srp, slo, workload, extraSpecs):
"""Create the volume in the specified pool.
:param conn: the connection information to the ecom server
:param srp: the SRP (String)
:param slo: the SLO (String)
:param workload: the workload (String)
+ :param extraSpecs: additional info
:returns: storageGroupInstanceName - storage group instance name
"""
EMCWorkload=workload)
if rc != 0L:
- rc, errordesc = rc, errordesc = self.utils.wait_for_job_complete(
- conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
LOG.error(_LE(
"Error Create Group: %(groupName)s. "
def _get_supported_size_range_for_SLO(
self, conn, storageConfigService,
- srpPoolInstanceName, storagePoolSettingInstanceName):
+ srpPoolInstanceName, storagePoolSettingInstanceName, extraSpecs):
"""Gets available performance capacity per SLO.
:param conn: the connection information to the ecom server
:param srpPoolInstanceName: the SRP storage pool instance
:param storagePoolSettingInstanceName: the SLO type
e.g Bronze
+ :param extraSpecs: additional info
:returns: supportedSizeDict - the supported size dict
"""
startTime = time.time()
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(
- conn, supportedSizeDict)
+ conn, supportedSizeDict, extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Cannot get supported size range for %(sps)s "
return supportedSizeDict
def get_volume_range(
- self, conn, storageConfigService, poolInstanceName, slo, workload):
+ self, conn, storageConfigService, poolInstanceName, slo, workload,
+ extraSpecs):
"""Get upper and lower range for volume for slo/workload combination.
:param conn: the connection information to the ecom server
:param poolInstanceName: the pool instance
:param slo: slo string e.g Bronze
:param workload: workload string e.g DSS
+ :param extraSpecs: additional info
:returns: maximumVolumeSize - the maximum volume size supported
:returns: minimumVolumeSize - the minimum volume size supported
"""
if storagePoolCapabilityInstanceName:
supportedSizeDict = self._get_supported_size_range_for_SLO(
conn, storageConfigService, poolInstanceName,
- storagePoolSettingInstanceName)
+ storagePoolSettingInstanceName, extraSpecs)
maximumVolumeSize = supportedSizeDict['MaximumVolumeSize']
minimumVolumeSize = supportedSizeDict['MinimumVolumeSize']
return maximumVolumeSize, minimumVolumeSize
def activate_snap_relationship(
- self, conn, repServiceInstanceName, syncInstanceName):
+ self, conn, repServiceInstanceName, syncInstanceName, extraSpecs):
"""Activate snap relationship and start copy operation.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
+ :param extraSpecs: additional info
:returns: rc - return code
:returns: job - job object of the replica creation operation
"""
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
- conn, repServiceInstanceName, syncInstanceName, operation)
+ conn, repServiceInstanceName, syncInstanceName, operation,
+ extraSpecs)
def return_to_resource_pool(self, conn, repServiceInstanceName,
- syncInstanceName):
+ syncInstanceName, extraSpecs):
"""Return the snap target resources back to the pool.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
+ :param extraSpecs: additional info
:returns: rc - return code
:returns: job - job object of the replica creation operation
"""
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
- conn, repServiceInstanceName, syncInstanceName, operation)
+ conn, repServiceInstanceName, syncInstanceName, operation,
+ extraSpecs)
def _modify_replica_synchronization(
self, conn, repServiceInstanceName, syncInstanceName,
- operation, force=False):
+ operation, extraSpecs, force=False):
"""Modify the relationship between the clone/snap and source volume.
Helper function that makes an SMI-S call to break clone relationship
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
- :param operatoin: opeation code
+ :param operation: opeation code
+ :param extraSpecs: additional info
:param force: force to modify replication synchronization if True
:returns: rc - return code
:returns: job - job object of the replica creation operation
{'sv': syncInstanceName, 'operation': operation, 'rc': rc})
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error modify replica synchronization: %(sv)s "
def create_group_replica(
self, conn, replicationService,
- srcGroupInstanceName, tgtGroupInstanceName, relationName):
+ srcGroupInstanceName, tgtGroupInstanceName, relationName,
+ extraSpecs):
"""Make SMI-S call to create replica for source group.
:param conn - the connection to the ecom server
:param srcGroupInstanceName - source group instance name
:param tgtGroupInstanceName - target group instance name
:param relationName - replica relationship name
+ :param extraSpecs: additional info
:returns: rc - return code
:returns: job - job object of the replica creation operation
"""
SyncType=self.utils.get_num(syncType, '16'))
if rc != 0L:
- rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+ rc, errordesc = self.utils.wait_for_job_complete(conn, job,
+ extraSpecs)
if rc != 0L:
exceptionMsg = (_("Error CreateGroupReplica: "
"source: %(source)s target: %(target)s. "
FC = 'fc'
JOB_RETRIES = 60
INTERVAL_10_SEC = 10
+INTERVAL = 'storagetype:interval'
+RETRIES = 'storagetype:retries'
CIM_ERR_NOT_FOUND = 6
return foundTierPolicyService
- def wait_for_job_complete(self, conn, job):
+ def wait_for_job_complete(self, conn, job, extraSpecs=None):
"""Given the job wait for it to complete.
:param conn: connection to the ecom server
"""
jobInstanceName = job['Job']
- self._wait_for_job_complete(conn, job)
+ if extraSpecs and (INTERVAL in extraSpecs or RETRIES in extraSpecs):
+ self._wait_for_job_complete(conn, job, extraSpecs)
+ else:
+ self._wait_for_job_complete(conn, job)
jobinstance = conn.GetInstance(jobInstanceName,
LocalOnly=False)
rc = jobinstance['ErrorCode']
errorDesc = jobinstance['ErrorDescription']
- LOG.debug("Return code is: %(rc)lu "
+ LOG.debug("Return code is: %(rc)lu. "
"Error Description is: %(errorDesc)s.",
{'rc': rc,
'errorDesc': errorDesc})
return rc, errorDesc
- def _wait_for_job_complete(self, conn, job):
+ def _wait_for_job_complete(self, conn, job, extraSpecs=None):
"""Given the job wait for it to complete.
- Called at an interval until the job is finished.
-
:param conn: connection to the ecom server
:param job: the job dict
"""
def _wait_for_job_complete():
+ # Called at an interval until the job is finished.
+ maxJobRetries = self._get_max_job_retries(extraSpecs)
retries = kwargs['retries']
wait_for_job_called = kwargs['wait_for_job_called']
if self._is_job_finished(conn, job):
raise loopingcall.LoopingCallDone()
- if retries > JOB_RETRIES:
+ if retries > maxJobRetries:
LOG.error(_LE("_wait_for_job_complete "
"failed after %(retries)d "
"tries."),
if self._is_job_finished(conn, job):
kwargs['wait_for_job_called'] = True
except Exception as e:
- LOG.error(_LE("Exception: %s") % six.text_type(e))
+ LOG.error(_LE("Exception: %s.") % six.text_type(e))
exceptionMessage = (_("Issue encountered waiting for job."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(exceptionMessage)
kwargs = {'retries': 0,
'wait_for_job_called': False}
+
+ intervalInSecs = self._get_interval_in_secs(extraSpecs)
+
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_job_complete)
- timer.start(interval=INTERVAL_10_SEC).wait()
+ timer.start(interval=intervalInSecs).wait()
+
+ def _get_max_job_retries(self, extraSpecs):
+ """Get max job retries either default or user defined
+
+ :param extraSpecs: extraSpecs
+
+ :returns: JOB_RETRIES or user defined
+ """
+ if extraSpecs:
+ jobRetries = extraSpecs[RETRIES]
+ else:
+ jobRetries = JOB_RETRIES
+ return int(jobRetries)
+
+ def _get_interval_in_secs(self, extraSpecs):
+ """Get interval in secs, either default or user defined
+
+ :param extraSpecs: extraSpecs
+
+ :returns: INTERVAL_10_SEC or user defined
+ """
+ if extraSpecs:
+ intervalInSecs = extraSpecs[INTERVAL]
+ else:
+ intervalInSecs = INTERVAL_10_SEC
+ return int(intervalInSecs)
def _is_job_finished(self, conn, job):
"""Check if the job is finished.
"Defaulting to NONE.")
return 'NONE'
+ def parse_interval_from_file(self, fileName):
+ """Parse the interval from config file.
+
+ If it is not there then the default will be used.
+
+ :param fileName: the path and name of the file
+ :returns: interval - the interval in seconds
+ """
+ interval = self._parse_from_file(fileName, 'Interval')
+ if interval:
+ return interval
+ else:
+ LOG.debug("Interval not found in config file.")
+ return None
+
+ def parse_retries_from_file(self, fileName):
+ """Parse the retries from config file.
+
+ If it is not there then the default will be used.
+
+ :param fileName: the path and name of the file
+ :returns: retries - the max number of retries
+ """
+ retries = self._parse_from_file(fileName, 'Retries')
+ if retries:
+ return retries
+ else:
+ LOG.debug("Retries not found in config file.")
+ return None
+
def parse_pool_instance_id(self, poolInstanceId):
"""Given the instance Id parse the pool name and system name from it.