Replace 123L with 123 to make Cinder code compatible with Python 3.
This patch was generated by the long operation of the sixer tool:
https://pypi.python.org/pypi/sixer
Manual changes:
* hp_3par_common: bump version
Blueprint cinder-python3
Change-Id: I3086d3e3d9c3ede358313bdd43d1eee9cfdafe15
# Simulate stat to return the mode of a block device
# make sure that st_mode (the first in the sequence(
# matches the mode of a block device
- return posix.stat_result((25008, 5753, 5L, 1, 0, 6, 0,
+ return posix.stat_result((25008, 5753, 5, 1, 0, 6, 0,
1375881199, 1375881197, 1375881197))
# Simulate stat to return the mode of a block device
# make sure that st_mode (the first in the sequence(
# matches the mode of a block device
- return posix.stat_result((33188, 5753, 5L, 1, 0, 6, 0,
+ return posix.stat_result((33188, 5753, 5, 1, 0, 6, 0,
1375881199, 1375881197, 1375881197))
# Simulate stat to return the mode of a block device
# make sure that st_mode (the first in the sequence(
# matches the mode of a block device
- return posix.stat_result((17407, 5753, 5L, 1, 0, 6, 0,
+ return posix.stat_result((17407, 5753, 5, 1, 0, 6, 0,
1375881199, 1375881197, 1375881197))
def fake_getSupportedReplicationTypes(self):
cimproperty = Fake_CIMProperty()
- cimproperty.value = [2L, 10L]
+ cimproperty.value = [2, 10]
return cimproperty
DeviceMaskingGroup=None, TargetMaskingGroup=None,
ProtocolController=None, StorageID=None, IDType=None):
- rc = 0L
+ rc = 0
myjob = SE_ConcreteJob()
myjob.classname = 'SE_ConcreteJob'
myjob['InstanceID'] = '9999'
if Size == -1073741824 and \
MethodName == 'CreateOrModifyCompositeElement':
- rc = 0L
+ rc = 0
myjob = SE_ConcreteJob()
myjob.classname = 'SE_ConcreteJob'
myjob['InstanceID'] = '99999'
if ElementName == 'failed_vol' and \
MethodName == 'CreateOrModifyElementFromStoragePool':
- rc = 10L
+ rc = 10
myjob['status'] = 'failure'
elif TheElements and \
TheElements[0]['DeviceID'] == '99999' and \
MethodName == 'EMCReturnToStoragePool':
- rc = 10L
+ rc = 10
myjob['status'] = 'failure'
elif HardwareId:
- rc = 0L
+ rc = 0
targetendpoints = {}
endpoints = []
endpoint = {}
return rc, targetendpoints
elif ReplicationType and \
MethodName == 'GetDefaultReplicationSettingData':
- rc = 0L
+ rc = 0
rsd = SE_ReplicationSettingData()
rsd['DefaultInstance'] = SE_ReplicationSettingData()
return rc, rsd
if MethodName == 'CreateStorageHardwareID':
ret = {}
- rc = 0L
+ rc = 0
ret['HardwareID'] = self.data.iscsi_initiator
return rc, ret
volumeDict = {'classname': u'Symm_StorageVolume',
'keybindings': EMCVMAXCommonData.keybindings}
common.provision.create_volume_from_pool = (
- mock.Mock(return_value=(volumeDict, 0L)))
+ mock.Mock(return_value=(volumeDict, 0)))
common.provision.get_volume_dict_from_job = (
mock.Mock(return_value=volumeDict))
self.driver.create_snapshot(self.data.test_volume)
self.data.test_volume['volume_name'] = "vmax-1234567"
e = exception.VolumeBackendAPIException('CreateElementReplica Ex')
common = self.driver.common
- common._delete_from_pool = mock.Mock(return_value=0L)
+ common._delete_from_pool = mock.Mock(return_value=0)
conn = self.fake_ecom_connection()
storageConfigService = (
common.utils.find_storage_configuration_service(
common = self.driver.common
volumeDict = {'classname': u'Symm_StorageVolume',
'keybindings': EMCVMAXCommonData.keybindings}
- volume = {'size': 0L}
+ volume = {'size': 0}
common.provision.create_volume_from_pool = (
mock.Mock(return_value=(volumeDict, volume['size'])))
common.provision.get_volume_dict_from_job = (
mock.Mock(return_value=volumeDict))
common._create_composite_volume = (
- mock.Mock(return_value=(0L,
+ mock.Mock(return_value=(0,
volumeDict,
EMCVMAXCommonData.storage_system)))
volumeDict = {'classname': u'Symm_StorageVolume',
'keybindings': EMCVMAXCommonData.keybindings}
common.provision.create_volume_from_pool = (
- mock.Mock(return_value=(volumeDict, 0L)))
+ mock.Mock(return_value=(volumeDict, 0)))
common.provision.get_volume_dict_from_job = (
mock.Mock(return_value=volumeDict))
common.fast.is_volume_in_default_SG = (
mock_policy, mock_meta, mock_size, mock_pool):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.common._modify_and_get_composite_volume_instance = (
- mock.Mock(return_value=(1L, None)))
+ mock.Mock(return_value=(1, None)))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
self.data.test_volume,
volumeDict = {'classname': u'Symm_StorageVolume',
'keybindings': EMCVMAXCommonData.keybindings}
common.provision.create_volume_from_pool = (
- mock.Mock(return_value=(volumeDict, 0L)))
+ mock.Mock(return_value=(volumeDict, 0)))
common.provision.get_volume_dict_from_job = (
mock.Mock(return_value=volumeDict))
common.fast.is_volume_in_default_SG = (
mock_meta, mock_size, mock_pool):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.common._modify_and_get_composite_volume_instance = (
- mock.Mock(return_value=(1L, None)))
+ mock.Mock(return_value=(1, None)))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
self.data.test_volume,
self, _mock_volume_type, _mock_storage, _mock_cg, _mock_members,
mock_rg):
provisionv3 = self.driver.common.provisionv3
- provisionv3.create_group_replica = mock.Mock(return_value=(0L, None))
+ provisionv3.create_group_replica = mock.Mock(return_value=(0, None))
self.driver.create_cgsnapshot(
self.data.test_ctxt, self.data.test_CG_snapshot)
repServ = self.conn.EnumerateInstanceNames("EMC_ReplicationService")[0]
volumeDict = {'classname': u'Symm_StorageVolume',
'keybindings': EMCVMAXCommonData.keybindings}
common._create_v3_volume = (
- mock.Mock(return_value=(0L, volumeDict, self.data.storage_system)))
+ mock.Mock(return_value=(0, volumeDict, self.data.storage_system)))
conn = self.fake_ecom_connection()
storageConfigService = {}
storageConfigService['SystemName'] = EMCVMAXCommonData.storage_system
storageConfigService['CreationClassName'] = \
self.data.stconf_service_creationclass
- common._delete_from_pool_v3 = mock.Mock(return_value=0L)
+ common._delete_from_pool_v3 = mock.Mock(return_value=0)
mock_create_replica.side_effect = e
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
test_volume4 = {'migration_status': None, 'availability_zone': 'nova',
'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24',
'name': 'vol4',
- 'size': 2L,
+ 'size': 2,
'status': 'available',
'volume_type_id':
'19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
PolicyRule=tierPolicyRuleInstanceName,
Operation=self.utils.get_num(modificationType, '16'),
InElements=[storageGroupInstanceName])
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error associating storage group : %(storageGroupName)s. "
"To fast Policy: %(fastPolicyName)s with error "
PolicyRule=tierPolicyRuleInstanceName,
Operation=self.utils.get_num(modificationType, '16'),
InElements=[storageGroupInstanceName])
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
LOG.error(_LE("Error disassociating storage group from "
"policy: %s."), errordesc)
else:
DeviceMaskingGroup=deviceMaskingGroup,
TargetMaskingGroup=targetMaskingGroup)
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error Create Masking View: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
Type=self.utils.get_num(INITIATORGROUPTYPE, '16'),
Members=[hardwareIdinstanceNames[0]])
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error Create Group: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
MaskingGroup=foundInitiatorGroupInstanceName,
Members=[hardwareIdinstanceNames[j]])
- if rc != 0L:
+ if rc != 0:
rc, errordesc = (
self.utils.wait_for_job_complete(conn, job,
extraSpecs))
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error adding initiator to group : %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
controllerConfigService,
ProtocolController=maskingViewInstanceName)
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error Modifying masking view : %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
MaskingGroup=storageGroupInstanceName,
Force=True)
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error Deleting Group: %(storageGroupName)s. "
"Return code: %(rc)lu. Error: %(error)s")
'EMCReturnToStoragePool', storageConfigservice,
TheElements=theElements)
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error Delete Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
{'volumename': volumeName,
'rc': rc})
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error Create Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
Type=self.utils.get_num(STORAGEGROUPTYPE, '16'),
Members=[volumeInstanceName])
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error Create Group: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
Type=self.utils.get_num(STORAGEGROUPTYPE, '16'),
DeleteWhenBecomesUnassociated=False)
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error Create Group: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
controllerConfigService,
MaskingGroup=storageGroupInstanceName,
Members=[volumeInstanceName])
- if rc != 0L:
+ if rc != 0:
rc, errorDesc = self.utils.wait_for_job_complete(conn, jobDict,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error removing volume %(vol)s. %(error)s.")
% {'vol': volumeName, 'error': errorDesc})
MaskingGroup=storageGroupInstanceName,
Members=[volumeInstanceName])
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error mapping volume %(vol)s. %(error)s.")
% {'vol': volumeName, 'error': errordesc})
InPool=poolInstanceName,
TheElement=volumeInstanceName)
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error unbinding volume %(vol)s from pool. %(error)s.")
% {'vol': volumeName, 'error': errordesc})
TheElement=theVolumeInstanceName,
InElements=[inVolumeInstanceName])
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error adding volume to composite volume. "
"Error is: %(error)s.")
CompositeType=self.utils.get_num(compositeType, '16'),
EMCNumberOfMembers=self.utils.get_num(numMembers, '32'))
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error Create Volume: %(volumename)s. "
"Return code: %(rc)lu. Error: %(error)s.")
[compositeHeadInstanceName, compositeMemberInstanceName]),
CompositeType=self.utils.get_num(compositeType, '16'))
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error Creating new composite Volume Return code: "
"%(rc)lu. Error: %(error)s.")
TheElements=[volumeInstanceName],
TargetPool=targetPoolInstanceName)
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error Migrating volume from one pool to another. "
"Return code: %(rc)lu. Error: %(error)s.")
rc, job = conn.InvokeMethod(
'RequestStateChange', volumeInstanceName,
RequestedState=self.utils.get_num(32769, '16'))
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error Terminating migrate session. "
"Return code: %(rc)lu. Error: %(error)s.")
repServiceCapabilityInstanceName,
ReplicationType=self.utils.get_num(10, '16'))
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, rsd,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error creating cloned volume using "
"Volume: %(cloneName)s, Source Volume: "
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path)
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error Create Cloned Volume: "
"Volume: %(cloneName)s Source Volume:"
{'syncName': syncInstanceName,
'rc': rc})
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error break clone relationship: "
"Sync Name: %(syncName)s "
'EMCGetTargetEndpoints', storageHardwareService,
HardwareId=hardwareId)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_("Error finding Target WWNs."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
replicationService,
GroupName=consistencyGroupName)
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Failed to create consistency group: "
"%(consistencyGroupName)s "
ReplicationGroup=cgInstanceName,
RemoveElements=True)
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Failed to delete consistency group: "
"%(consistencyGroupName)s "
Members=[volumeInstanceName],
ReplicationGroup=cgInstanceName)
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Failed to add volume %(volumeName)s: "
"to consistency group %(cgName)s "
ReplicationGroup=cgInstanceName,
RemoveElements=True)
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Failed to remove volume %(volumeName)s: "
"to consistency group %(cgName)s "
TargetGroup=tgtGroupInstanceName,
SyncType=self.utils.get_num(8, '16'))
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMsg = (_("Error CreateGroupReplica: "
"source: %(source)s target: %(target)s. "
"Return code: %(rc)lu. Error: %(error)s.")
'ReturnElementsToStoragePool', storageConfigservice,
TheElements=theElements)
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error Delete Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
{'volumename': volumeName,
'rc': rc})
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error Create Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path)
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error Create Cloned Volume: %(cloneName)s "
"Return code: %(rc)lu. Error: %(error)s.")
EMCSLO=slo,
EMCWorkload=workload)
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
LOG.error(_LE(
"Error Create Group: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s."),
ElementType=self.utils.get_num(3, '16'),
Goal=storagePoolSettingInstanceName)
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(
conn, supportedSizeDict, extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Cannot get supported size range for %(sps)s "
"Return code: %(rc)lu. Error: %(error)s.")
"operation: %(operation)s Return code: %(rc)lu.",
{'sv': syncInstanceName, 'operation': operation, 'rc': rc})
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMessage = (_(
"Error modify replica synchronization: %(sv)s "
"operation: %(operation)s. "
TargetGroup=tgtGroupInstanceName,
SyncType=self.utils.get_num(syncType, '16'))
- if rc != 0L:
+ if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
- if rc != 0L:
+ if rc != 0:
exceptionMsg = (_("Error CreateGroupReplica: "
"source: %(source)s target: %(target)s. "
"Return code: %(rc)lu. Error: %(error)s.")
LocalOnly=False)
jobstate = jobinstance['JobState']
# From ValueMap of JobState in CIM_ConcreteJob
- # 2L=New, 3L=Starting, 4L=Running, 32767L=Queue Pending
+ # 2=New, 3=Starting, 4=Running, 32767=Queue Pending
# ValueMap("2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13..32767,
# 32768..65535"),
# Values("New, Starting, Running, Suspended, Shutting Down,
# Completed, Terminated, Killed, Exception, Service,
# Query Pending, DMTF Reserved, Vendor Reserved")]
- if jobstate in [2L, 3L, 4L, 32767L]:
+ if jobstate in [2, 3, 4, 32767]:
return False
else:
return True
2.0.42 - Fix type for snapshot config settings. bug #1461640
2.0.43 - Report the capability of supporting multiattach
2.0.44 - Update help strings to reduce the 3PAR user role requirements
+ 2.0.45 - Python 3 fixes
"""
- VERSION = "2.0.44"
+ VERSION = "2.0.45"
stats = {}
# because 3PAR volume sizes are in
# Mebibytes, Gigibytes, not Megabytes.
- MB = 1000L
+ MB = 1000
MiB = 1.048576
if int(vol_size) == 0: