'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
- 'consistencygroup_id': None
+ 'consistencygroup_id': None,
+ 'provider_location': 'system^FNM11111|type^lun|id^1',
}
test_volume_cg = {
'display_name': 'thin_vol',
'consistencygroup_id': None,
'display_description': 'vol with type',
- 'volume_type_id': 'abc1-2320-9013-8813-8941-1374-8112-1231'}
+ 'volume_type_id': 'abc1-2320-9013-8813-8941-1374-8112-1231',
+ 'provider_location': 'system^FNM11111|type^lun|id^1'}
test_failed_volume = {
'name': 'failed_vol1',
'display_name': 'clone1',
'consistencygroup_id': None,
'display_description': 'volume created from snapshot',
- 'volume_type_id': None}
+ 'volume_type_id': '19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
+ 'provider_location': 'system^fakesn|type^lun|id^2|version^05.03.00'}
test_clone_cg = {
'name': 'clone1',
'size': 1,
'display_name': 'clone1',
'consistencygroup_id': 'consistencygroup_id',
'display_description': 'volume created from snapshot',
- 'volume_type_id': None}
+ 'volume_type_id': None,
+ 'provider_location': 'system^fakesn|type^lun|id^2|version^05.03.00'}
connector = {
'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'-allowReadWrite', 'yes',
'-allowAutoDelete', 'no')
+ def SNAP_MODIFY_CMD(self, name, rw):
+ return ('snap', '-modify', '-id', name, '-allowReadWrite', rw,
+ '-allowAutoDelete', 'yes')
+
def SNAP_LIST_CMD(self, res_id=1):
cmd = ('snap', '-list', '-res', res_id)
return cmd
re.match(r".*Compression Enabler is not installed",
ex.msg))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'copytype:snap': 'true'}))
+ def test_create_volume_snapcopy_in_cg(self):
+ self.driverSetup()
+ vol = self.testData.test_volume_with_type.copy()
+ vol['consistencygroup_id'] = '7450764f-9d24-4c70-ad46-7cd90acd4292'
+ self.assertRaises(
+ exception.VolumeBackendAPIException,
+ self.driver.create_volume,
+ vol)
+
def test_get_volume_stats(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(True)]
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
+ @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
+ "CommandLineHelper.create_lun_by_cmd",
+ mock.Mock(
+ return_value={'lun_id': 1}))
+ @mock.patch(
+ "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+ mock.Mock(
+ side_effect=[1, 1]))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:tiering': 'Auto',
+ 'copytype:snap': 'true'}))
+ def test_volume_migration_smp(self):
+
+ commands = [self.testData.MIGRATION_CMD(),
+ self.testData.MIGRATION_VERIFY_CMD(1)]
+ FAKE_MIGRATE_PROPERTY = """\
+Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
+Source LU ID: 63950
+Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
+Dest LU ID: 136
+Migration Rate: high
+Current State: MIGRATED
+Percent Complete: 100
+Time Remaining: 0 second(s)
+"""
+ results = [SUCCEED,
+ [(FAKE_MIGRATE_PROPERTY, 0),
+ ('The specified source LUN is not '
+ 'currently migrating', 23)]]
+ fake_cli = self.driverSetup(commands, results)
+ fake_host = {'capabilities': {'location_info':
+ "unit_test_pool2|fakeSerial",
+ 'storage_protocol': 'iSCSI'}}
+
+ vol = self.testData.test_volume.copy()
+ vol['provider_location'] = 'system^FNM11111|type^smp|id^1'
+ tmp_snap = "snap-as-vol-%s" % vol['id']
+ ret = self.driver.migrate_volume(None,
+ vol,
+ fake_host)
+ self.assertTrue(ret[0])
+ self.assertTrue(
+ ret[1]['provider_location'].find('type^lun') > 0)
+ # verification
+ expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
+ retry_disable=True,
+ poll=True),
+ mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+ poll=True),
+ mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+ poll=False),
+ mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
+ poll=True)]
+ fake_cli.assert_has_calls(expect_cmd)
+
def test_create_destroy_volume_snapshot(self):
fake_cli = self.driverSetup()
*self.testData.SNAP_MP_CREATE_CMD(
name='vol2', source='vol1'),
poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
+ poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='snapshot1')),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
- poll=True),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True,
poll=True),
ignore_thresholds=True))]
fake_cli2.assert_has_calls(expect_cmd2)
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'copytype:snap': 'true'}))
+ def test_create_volume_from_snapshot_smp(self):
+ fake_cli = self.driverSetup()
+ vol = self.driver.create_volume_from_snapshot(
+ self.testData.test_volume_with_type,
+ self.testData.test_snapshot)
+ self.assertTrue(
+ vol['provider_location'].find('type^smp') > 0)
+ expect_cmd = [
+ mock.call(
+ *self.testData.SNAP_COPY_CMD(
+ src_snap='snapshot1',
+ snap_name='snap-as-vol-%s' % '1')),
+ mock.call(
+ *self.testData.SNAP_MODIFY_CMD(
+ name='snap-as-vol-%s' % '1',
+ rw='yes')),
+ mock.call(
+ *self.testData.SNAP_MP_CREATE_CMD(
+ name='vol_with_type', source='vol1'),
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+ poll=True),
+ mock.call(
+ *self.testData.SNAP_ATTACH_CMD(
+ name='vol_with_type', snapName='snap-as-vol-%s' % '1'))]
+ fake_cli.assert_has_calls(expect_cmd)
+
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_create_volume_from_snapshot_sync_failed(self):
*self.testData.SNAP_MP_CREATE_CMD(
name='vol2', source='vol1'),
poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
+ poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='snapshot1')),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
- poll=True),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True,
poll=True),
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='vol2', source='vol1'), poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'), poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='snapshot1')),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'), poll=True),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
poll=True,
retry_disable=True),
fake_cli.assert_has_calls(expect_cmd)
def test_create_cloned_volume(self):
- cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
- cmd_dest_p = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
- output_dest = self.testData.LUN_PROPERTY("vol1_dest")
+ cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("clone1_dest")
+ cmd_dest_p = self.testData.LUN_PROPERTY_ALL_CMD("clone1_dest")
+ output_dest = self.testData.LUN_PROPERTY("clone1_dest")
+ cmd_clone = self.testData.LUN_PROPERTY_ALL_CMD("clone1")
+ output_clone = self.testData.LUN_PROPERTY("clone1")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
- commands = [cmd_dest, cmd_dest_p, cmd_migrate,
+ commands = [cmd_dest, cmd_dest_p, cmd_clone, cmd_migrate,
cmd_migrate_verify]
- results = [output_dest, output_dest, output_migrate,
+ results = [output_dest, output_dest, output_clone, output_migrate,
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
- self.driver.create_cloned_volume(self.testData.test_volume,
- self.testData.test_snapshot)
+ volume = self.testData.test_volume.copy()
+ volume['name'] = 'clone1'
+
+ self.driver.create_cloned_volume(volume, self.testData.test_volume)
tmp_snap = 'tmp-snap-' + self.testData.test_volume['id']
expect_cmd = [
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('snapshot1'),
- poll=True),
mock.call(
*self.testData.SNAP_CREATE_CMD(tmp_snap), poll=False),
mock.call(*self.testData.SNAP_MP_CREATE_CMD(
- name='vol1',
- source='snapshot1'), poll=False),
+ name='clone1',
+ source='vol1'), poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('clone1'),
+ poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
- name='vol1', snapName=tmp_snap)),
+ name='clone1', snapName=tmp_snap)),
mock.call(*self.testData.LUN_CREATION_CMD(
- 'vol1_dest', 1, 'unit_test_pool', None, None)),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest'),
+ 'clone1_dest', 1, 'unit_test_pool', None, None)),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('clone1_dest'),
poll=False),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest'),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('clone1_dest'),
poll=False),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
- poll=True),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
poll=True,
retry_disable=True),
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'copytype:snap': 'true'}))
+ def test_create_cloned_volume_smp(self):
+ fake_cli = self.driverSetup()
+ vol = self.driver.create_cloned_volume(
+ self.testData.test_clone,
+ self.testData.test_volume_with_type)
+ self.assertTrue(
+ vol['provider_location'].find('type^smp') > 0)
+ expect_cmd = [
+ mock.call(
+ *self.testData.SNAP_CREATE_CMD(
+ name='snap-as-vol-%s' % '2'),
+ poll=False),
+ mock.call(
+ *self.testData.SNAP_MP_CREATE_CMD(
+ name='clone1', source='vol_with_type'),
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('clone1'),
+ poll=True),
+ mock.call(
+ *self.testData.SNAP_ATTACH_CMD(
+ name='clone1', snapName='snap-as-vol-%s' % '2'))]
+ fake_cli.assert_has_calls(expect_cmd)
+
def test_delete_volume_failed(self):
commands = [self.testData.LUN_DELETE_CMD('failed_vol1')]
results = [FAKE_ERROR_RETURN]
mock.call(*self.testData.LUN_DELETE_CMD('vol2_in_sg'))]
fake_cli.assert_has_calls(expected)
+ def test_delete_volume_smp(self):
+ fake_cli = self.driverSetup()
+ vol = self.testData.test_volume_with_type.copy()
+ vol['provider_location'] = 'system^FNM11111|type^smp|id^1'
+ tmp_snap = 'snap-as-vol-%s' % vol['id']
+ self.driver.delete_volume(vol)
+ expected = [mock.call(*self.testData.LUN_DELETE_CMD(vol['name'])),
+ mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
+ poll=True)]
+ fake_cli.assert_has_calls(expected)
+
def test_extend_volume(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol1')]
results = [self.testData.LUN_PROPERTY('vol1', size=2)]
diff_data,
host_test_data)
+ @mock.patch(
+ "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper."
+ "migrate_lun_with_verification",
+ mock.Mock(return_value=True))
+ @mock.patch(
+ "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper."
+ "create_lun_with_advance_feature",
+ mock.Mock(return_value={'lun_id': '1'}))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:provisioning': 'thin',
+ 'copytype:snap': 'true'}))
+ def test_retype_copytype_snap_true_to_false(self):
+ diff_data = {'encryption': {}, 'qos_specs': {},
+ 'extra_specs':
+ {'copytype:snap': ('true',
+ 'false')}}
+
+ new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
+ 'deleted': False,
+ 'extra_specs': {'storagetype:provisioning': 'thin',
+ 'copytype:snap': 'false'},
+ 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
+
+ host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
+ 'capabilities':
+ {'location_info': 'unit_test_pool|FNM00124500890',
+ 'volume_backend_name': 'pool_backend_1',
+ 'storage_protocol': 'iSCSI'}}
+
+ cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
+ output_migrate_verify = (r'The specified source LUN '
+ 'is not currently migrating', 23)
+ commands = [self.testData.NDU_LIST_CMD,
+ self.testData.SNAP_LIST_CMD(),
+ cmd_migrate_verify]
+ results = [self.testData.NDU_LIST_RESULT,
+ ('No snap', 1023),
+ output_migrate_verify]
+ fake_cli = self.driverSetup(commands, results)
+ self.driver.cli.enablers = ['-Compression',
+ '-Deduplication',
+ '-ThinProvisioning',
+ '-FAST']
+ emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
+ return_value={'array_serial': "FNM00124500890"})
+
+ vol = self.testData.test_volume3.copy()
+ vol['provider_location'] = 'system^FNM11111|type^smp|id^1'
+ tmp_snap = 'snap-as-vol-%s' % vol['id']
+ ret = self.driver.retype(None, vol,
+ new_type_data,
+ diff_data,
+ host_test_data)
+ self.assertTrue(type(ret) == tuple)
+ self.assertTrue(ret[0])
+ self.assertTrue(
+ ret[1]['provider_location'].find('type^lun') > 0)
+ expect_cmd = [
+ mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
+ mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
+ poll=True)]
+ fake_cli.assert_has_calls(expect_cmd)
+
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.call(*self.testData.SNAP_MP_CREATE_CMD(name='vol1',
source='clone1'),
poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'), poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol1', snapName=tmp_cgsnapshot)),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest'),
poll=False),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'), poll=True),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True,
poll=True),
*self.testData.SNAP_MP_CREATE_CMD(
name='vol2', source='vol1'),
poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
+ poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='cgsnapshot_id')),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
- poll=True),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True,
poll=True),
def test_create_consistencygroup_from_cgsnapshot(self):
output_migrate_verify = ('The specified source LUN '
- 'is not currently migrating', 23)
+ 'is not currently migrating.', 23)
new_cg = self.testData.test_cg.copy()
new_cg.update(
{'id': 'new_cg_id'})
mock.call(*td.SNAP_MP_CREATE_CMD(vol1_in_new_cg['name'],
snap1_in_src_cgsnap['volume_name']),
poll=False),
+ mock.call(*td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name']),
+ poll=True),
mock.call(*td.SNAP_ATTACH_CMD(vol1_in_new_cg['name'],
copied_snap_name)),
mock.call(*td.LUN_CREATION_CMD(vol1_in_new_cg['name'] + '_dest',
vol1_in_new_cg['name'] + '_dest'), poll=False),
mock.call(*td.LUN_PROPERTY_ALL_CMD(
vol1_in_new_cg['name'] + '_dest'), poll=False),
- mock.call(*td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name']),
- poll=True),
mock.call(*td.MIGRATION_CMD(6231, 1),
poll=True, retry_disable=True),
mock.call(*td.SNAP_MP_CREATE_CMD(vol2_in_new_cg['name'],
snap2_in_src_cgsnap['volume_name']),
poll=False),
+ mock.call(*td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name']),
+ poll=True),
mock.call(*td.SNAP_ATTACH_CMD(vol2_in_new_cg['name'],
copied_snap_name)),
mock.call(*td.LUN_CREATION_CMD(vol2_in_new_cg['name'] + '_dest',
vol2_in_new_cg['name'] + '_dest'), poll=False),
mock.call(*td.LUN_PROPERTY_ALL_CMD(
vol2_in_new_cg['name'] + '_dest'), poll=False),
- mock.call(*td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name']),
- poll=True),
mock.call(*td.MIGRATION_CMD(6232, 2),
poll=True, retry_disable=True),
mock.call(*td.MIGRATION_VERIFY_CMD(6232), poll=True),
vol1_in_new_cg['name'] + '_dest')),
mock.call('lun', '-detach', '-name', vol1_in_new_cg['name'], '-o'),
mock.call(*self.testData.LUN_DELETE_CMD(vol1_in_new_cg['name'])),
- mock.call(*td.DELETE_CG_SNAPSHOT(copied_snap_name))]
+ mock.call(*td.SNAP_DELETE_CMD(copied_snap_name), poll=True)]
fake_cli.assert_has_calls(expect_cmd)
def test_deregister_initiator(self):
self.assertTrue(has_error)
def test_has_error_not_exist(self):
- output = "The specified snapshot does not exist"
+ output = "The specified snapshot does not exist."
has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
self.assertTrue(has_error)
- output = "The (pool lun) may not exist"
+ output = "The (pool lun) may not exist."
has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
self.assertTrue(has_error)
def test_has_error_multi_line(self):
output = """Could not retrieve the specified (pool lun).
- The (pool lun) may not exist"""
+ The (pool lun) may not exist."""
has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
self.assertTrue(has_error)
def test_has_error_regular_string_false(self):
- output = "Cannot unbind LUN because it's contained in a Storage Group"
+ output = "Cannot unbind LUN because it's contained in a Storage Group."
has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
self.assertFalse(has_error)
def test_has_error_multi_errors(self):
- output = "Cannot unbind LUN because it's contained in a Storage Group"
+ output = "Cannot unbind LUN because it's contained in a Storage Group."
has_error = VNXError.has_error(output,
VNXError.LUN_IN_SG,
VNXError.GENERAL_NOT_FOUND)
self.assertTrue(has_error)
- output = "Cannot unbind LUN because it's contained in a Storage Group"
+ output = "Cannot unbind LUN because it's contained in a Storage Group."
has_error = VNXError.has_error(output,
VNXError.LUN_ALREADY_EXPANDED,
VNXError.LUN_NOT_MIGRATING)
LUN_IN_SG = 'contained in a Storage Group|LUN mapping still exists'
LUN_NOT_MIGRATING = ('The specified source LUN is '
'not currently migrating')
+ LUN_IS_NOT_SMP = 'it is not a snapshot mount point'
CG_IS_DELETING = 0x712d8801
CG_EXISTED = 0x716d8021
self.provisioning_specs = [
'provisioning:type',
'storagetype:provisioning']
+ self.copytype_spec = 'copytype:snap'
self.provisioning_values = {
'thin': ['-type', 'Thin'],
'thick': ['-type', 'NonThin'],
'compressed': ['-type', 'Thin'],
'deduplicated': ['-type', 'Thin', '-deduplication', 'on']}
self.tiering_values = {
+ 'none': None,
'starthighthenauto': [
'-initialTier', 'highestAvailable',
'-tieringPolicy', 'autoTier'],
if provisioning:
command_create_lun.extend(self.provisioning_values[provisioning])
# tiering
- if tiering:
+ if tiering != 'none':
command_create_lun.extend(self.tiering_values[tiering])
if ignore_thresholds:
command_create_lun.append('-ignoreThresholds')
command_modify_lun = ['lun', '-modify',
'-name', name,
'-o']
- if tiering:
+ if tiering != 'none':
command_modify_lun.extend(self.tiering_values[tiering])
out, rc = self.command_execute(*command_modify_lun)
msg = _('Failed to create snapshot as no LUN ID is specified')
raise exception.VolumeBackendAPIException(data=msg)
+ def copy_snapshot(self, src_snap_name, new_name):
+ command_copy_snapshot = ('snap', '-copy',
+ '-id', src_snap_name,
+ '-name', new_name,
+ '-ignoreMigrationCheck',
+ '-ignoreDeduplicationCheck')
+
+ out, rc = self.command_execute(*command_copy_snapshot)
+ if rc != 0:
+ # Ignore the error if the snap already exists
+ if VNXError.has_error(out, VNXError.SNAP_NAME_EXISTED):
+ LOG.warning(_LW('Snapshot %(name)s already exists. '
+ 'Message: %(msg)s'),
+ {'name': new_name, 'msg': out})
+ else:
+ self._raise_cli_error(command_copy_snapshot, rc, out)
+
def delete_snapshot(self, name):
def delete_snapshot_success():
return rc
- def copy_snapshot(self, src_snap_name, new_name):
-
- copy_snap_cmd = ('snap', '-copy',
- '-id', src_snap_name,
- '-name', new_name,
- '-ignoreMigrationCheck',
- '-ignoreDeduplicationCheck')
-
- out, rc = self.command_execute(*copy_snap_cmd)
- if rc != 0:
- self._raise_cli_error(copy_snap_cmd, rc, out)
-
def allow_snapshot_readwrite_and_autodelete(self, snap_name):
modify_cmd = ('snap', '-modify', '-id', snap_name,
'thin_provisioning_support': False,
'thick_provisioning_support': True}
enablers = []
+ tmp_snap_prefix = 'tmp-snap-'
+ snap_as_vol_prefix = 'snap-as-vol-'
+ tmp_cgsnap_prefix = 'tmp-cgsnapshot-'
def __init__(self, prtcl, configuration=None):
self.protocol = prtcl
return self.array_serial['array_serial']
def _construct_store_spec(self, volume, snapshot):
- if snapshot['cgsnapshot_id']:
- snapshot_name = snapshot['cgsnapshot_id']
- else:
- snapshot_name = snapshot['name']
- source_volume_name = snapshot['volume_name']
- volume_name = volume['name']
- volume_size = snapshot['volume_size']
- dest_volume_name = volume_name + '_dest'
+ if snapshot['cgsnapshot_id']:
+ snapshot_name = snapshot['cgsnapshot_id']
+ else:
+ snapshot_name = snapshot['name']
+ source_volume_name = snapshot['volume_name']
+ volume_name = volume['name']
+ volume_size = snapshot['volume_size']
+ dest_volume_name = volume_name + '_dest'
+ snap_name = snapshot_name
+ pool_name = self.get_target_storagepool(volume, snapshot['volume'])
+ specs = self.get_volumetype_extraspecs(volume)
+ provisioning, tiering, snapcopy = self._get_extra_spec_value(specs)
+ if snapcopy == 'true':
+ snap_name = self._construct_snap_as_vol_name(volume)
+ store_spec = {
+ 'source_vol_name': source_volume_name,
+ 'volume': volume,
+ 'src_snap_name': snapshot_name,
+ 'snap_name': snap_name,
+ 'dest_vol_name': dest_volume_name,
+ 'pool_name': pool_name,
+ 'provisioning': provisioning,
+ 'tiering': tiering,
+ 'snapcopy': snapcopy,
+ 'volume_size': volume_size,
+ 'client': self._client,
+ 'ignore_pool_full_threshold': self.ignore_pool_full_threshold
+ }
+ return store_spec
- pool_name = self.get_target_storagepool(volume, snapshot['volume'])
- specs = self.get_volumetype_extraspecs(volume)
- provisioning, tiering = self._get_extra_spec_value(specs)
- store_spec = {
- 'source_vol_name': source_volume_name,
- 'volume': volume,
- 'snap_name': snapshot_name,
- 'dest_vol_name': dest_volume_name,
- 'pool_name': pool_name,
- 'provisioning': provisioning,
- 'tiering': tiering,
- 'volume_size': volume_size,
- 'client': self._client,
- 'ignore_pool_full_threshold': self.ignore_pool_full_threshold
- }
- return store_spec
+ def _construct_snap_as_vol_name(self, volume):
+ return self.snap_as_vol_prefix + volume['id']
+
+ def _construct_tmp_snap_name(self, volume):
+ return self.tmp_snap_prefix + volume['id']
def create_volume(self, volume):
"""Creates a EMC volume."""
volume_name = volume['name']
self._volume_creation_check(volume)
+ volume_metadata = self._get_volume_metadata(volume)
# defining CLI command
specs = self.get_volumetype_extraspecs(volume)
pool = self.get_target_storagepool(volume)
- provisioning, tiering = self._get_extra_spec_value(specs)
+ provisioning, tiering, snapcopy = self._get_extra_spec_value(specs)
- if not provisioning:
- provisioning = 'thick'
+ if snapcopy == 'true' and volume['consistencygroup_id']:
+ msg = _("Volume with copytype:snap=True can not be put in "
+ "consistency group.")
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI('Create Volume: %(volume)s Size: %(size)s '
'pool: %(pool)s '
'provisioning: %(provisioning)s '
- 'tiering: %(tiering)s.'),
+ 'tiering: %(tiering)s '
+ 'snapcopy: %(snapcopy)s.'),
{'volume': volume_name,
'size': volume_size,
'pool': pool,
'provisioning': provisioning,
- 'tiering': tiering})
+ 'tiering': tiering,
+ 'snapcopy': snapcopy})
data = self._client.create_lun_with_advance_feature(
pool, volume_name, volume_size,
provisioning, tiering, volume['consistencygroup_id'],
ignore_thresholds=self.ignore_pool_full_threshold,
poll=False)
- model_update = {'provider_location':
- self._build_provider_location_for_lun(data['lun_id'])}
+ pl = self._build_provider_location_for_lun(data['lun_id'])
+ volume_metadata['lun_type'] = 'lun'
+ model_update = {'provider_location': pl,
+ 'metadata': volume_metadata}
return model_update
"since driver version 5.1.0. This key will be "
"ignored."))
- provisioning, tiering = self._get_extra_spec_value(specs)
+ provisioning, tiering, snapcopy = self._get_extra_spec_value(specs)
# step 1: check extra spec value
- if provisioning:
- self._check_extra_spec_value(
- provisioning,
- self._client.provisioning_values.keys())
- if tiering:
- self._check_extra_spec_value(
- tiering,
- self._client.tiering_values.keys())
-
- # step 2: check extra spec combination
- self._check_extra_spec_combination(provisioning, tiering)
- return provisioning, tiering
+ self._check_extra_spec_value(
+ provisioning,
+ self._client.provisioning_values.keys())
+ self._check_extra_spec_value(
+ tiering,
+ self._client.tiering_values.keys())
+ self._check_extra_spec_value(
+ snapcopy, ['true', 'false'])
+ self._check_extra_spec_combination([provisioning, tiering, snapcopy])
+ return provisioning, tiering, snapcopy
def _check_extra_spec_value(self, extra_spec, valid_values):
"""Checks whether an extra spec's value is valid."""
def _get_extra_spec_value(self, extra_specs):
"""Gets EMC extra spec values."""
provisioning = 'thick'
- tiering = None
-
if self._client.provisioning_specs[0] in extra_specs:
provisioning = (
extra_specs[self._client.provisioning_specs[0]].lower())
"be deprecated in the next release. It is "
"recommended to use extra spec key "
"'provisioning:type' instead."))
- if self._client.tiering_spec in extra_specs:
- tiering = extra_specs[self._client.tiering_spec].lower()
+ tiering = extra_specs.get(
+ self._client.tiering_spec, 'None').lower()
+ snapcopy = extra_specs.get(
+ self._client.copytype_spec, 'False').lower()
- return provisioning, tiering
+ return provisioning, tiering, snapcopy
- def _check_extra_spec_combination(self, provisioning, tiering):
+ def _check_extra_spec_combination(self, spec_values):
"""Checks whether extra spec combination is valid."""
enablers = self.enablers
- # check provisioning and tiering
+ # check provisioning, tiering, snapcopy
# deduplicated and tiering can not be both enabled
- if provisioning == 'deduplicated' and tiering is not None:
+ provisioning, tiering, snapcopy = spec_values
+ if provisioning == 'deduplicated' and tiering != 'none':
msg = _("deduplicated and auto tiering can't be both enabled.")
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
"Can not create thin volume")
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
- elif tiering is not None and '-FAST' not in enablers:
+ elif tiering != 'none' and '-FAST' not in enablers:
msg = _("FAST VP Enabler is not installed. "
"Can't set tiering policy for the volume")
LOG.error(msg)
with excutils.save_and_reraise_exception():
# Reraise the original exception
pass
+ if volume['provider_location']:
+ lun_type = self._extract_provider_location_for_lun(
+ volume['provider_location'], 'type')
+ if lun_type == 'smp':
+ self._client.delete_snapshot(
+ self._construct_snap_as_vol_name(volume))
def extend_volume(self, volume, new_size):
"""Extends an EMC volume."""
src_id = self.get_lun_id(volume)
provisioning = 'thick'
- tiering = None
+ # because the value of tiering is a string, so change its defalut
+ # value from None to 'none'
+ tiering = 'none'
if new_type:
- provisioning, tiering = self._get_extra_spec_value(
+ provisioning, tiering, snapcopy = self._get_extra_spec_value(
new_type['extra_specs'])
else:
- provisioning, tiering = self._get_extra_spec_value(
+ provisioning, tiering, snapcopy = self._get_extra_spec_value(
self.get_volumetype_extraspecs(volume))
data = self._client.create_lun_with_advance_feature(
moved = self._client.migrate_lun_with_verification(
src_id, dst_id, new_volume_name)
- return moved, {}
+ lun_type = self._extract_provider_location_for_lun(
+ volume['provider_location'], 'type')
+ if lun_type == 'smp':
+ self._client.delete_snapshot(
+ self._construct_snap_as_vol_name(volume))
+
+ pl = self._build_provider_location_for_lun(src_id, 'lun')
+ volume_metadata = self._get_volume_metadata(volume)
+ volume_metadata['lun_type'] = 'lun'
+ model_update = {'provider_location': pl,
+ 'metadata': volume_metadata}
+ return moved, model_update
+
+ def update_migrated_volume(self, context, volume, new_volume):
+ lun_type = self._extract_provider_location_for_lun(
+ new_volume['provider_location'], 'type')
+ volume_metadata = self._get_volume_metadata(volume)
+ if lun_type:
+ volume_metadata['lun_type'] = lun_type
+ model_update = {'metadata': volume_metadata}
+ return model_update
def retype(self, ctxt, volume, new_type, diff, host):
new_specs = new_type['extra_specs']
- new_provisioning, new_tiering = (
+ new_provisioning, new_tiering, snapcopy = (
self._get_and_validate_extra_specs(new_specs))
# Check what changes are needed
self._is_valid_for_storage_assisted_migration(
volume, host, new_type))
if is_valid:
- if self._migrate_volume(
- volume, target_pool_name, new_type)[0]:
- return True
+ moved, model_update = self._migrate_volume(
+ volume, target_pool_name, new_type)
+ if moved:
+ return moved, model_update
else:
LOG.warning(_LW('Storage-assisted migration failed during '
'retype.'))
tiering_change = False
old_specs = self.get_volumetype_extraspecs(volume)
- old_provisioning, old_tiering = self._get_extra_spec_value(
- old_specs)
+ old_provisioning, old_tiering, old_snapcopy = (
+ self._get_extra_spec_value(old_specs))
new_specs = new_type['extra_specs']
- new_provisioning, new_tiering = self._get_extra_spec_value(
- new_specs)
+ new_provisioning, new_tiering, new_snapcopy = (
+ self._get_extra_spec_value(new_specs))
+
+ lun_type = self._extract_provider_location_for_lun(
+ volume['provider_location'], 'type')
if volume['host'] != host['host'] or \
old_provisioning != new_provisioning:
migration = True
+ if lun_type == 'smp':
+ migration = True
if new_tiering != old_tiering:
tiering_change = True
snapshot_name = snapshot['name']
volume_name = snapshot['volume_name']
volume = snapshot['volume']
+ lun_type = self._extract_provider_location_for_lun(
+ volume['provider_location'], 'type')
+ if lun_type == 'smp':
+ msg = (_('Failed to create snapshot of %s because it is a '
+ 'snapshot mount point.')
+ % volume_name)
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI('Create snapshot: %(snapshot)s: volume: %(volume)s'),
{'snapshot': snapshot_name,
'volume': volume_name})
1. Create a snap mount point (SMP) for the snapshot.
2. Attach the snapshot to the SMP created in the first step.
3. Create a temporary lun prepare for migration.
+ (Skipped if copytype:snap='true')
4. Start a migration between the SMP and the temp lun.
+ (Skipped if copytype:snap='true')
"""
self._volume_creation_check(volume)
flow_name = 'create_volume_from_snapshot'
work_flow = linear_flow.Flow(flow_name)
store_spec = self._construct_store_spec(volume, snapshot)
- work_flow.add(CreateSMPTask(),
- AttachSnapTask(),
- CreateDestLunTask(),
- MigrateLunTask())
- flow_engine = taskflow.engines.load(work_flow,
- store=store_spec)
- flow_engine.run()
- new_lun_id = flow_engine.storage.fetch('new_lun_id')
- model_update = {'provider_location':
- self._build_provider_location_for_lun(new_lun_id)}
+ volume_metadata = self._get_volume_metadata(volume)
+ if store_spec['snapcopy'] == 'false':
+ work_flow.add(CreateSMPTask(),
+ AttachSnapTask(),
+ CreateDestLunTask(),
+ MigrateLunTask())
+ flow_engine = taskflow.engines.load(work_flow,
+ store=store_spec)
+ flow_engine.run()
+ new_lun_id = flow_engine.storage.fetch('new_lun_id')
+ pl = self._build_provider_location_for_lun(new_lun_id, 'lun')
+ volume_metadata['lun_type'] = 'lun'
+ else:
+ work_flow.add(CopySnapshotTask(),
+ AllowReadWriteOnSnapshotTask(),
+ CreateSMPTask(),
+ AttachSnapTask())
+ flow_engine = taskflow.engines.load(work_flow,
+ store=store_spec)
+ flow_engine.run()
+ new_lun_id = flow_engine.storage.fetch('new_smp_id')
+ pl = self._build_provider_location_for_lun(new_lun_id, 'smp')
+ volume_metadata['lun_type'] = 'smp'
+ model_update = {'provider_location': pl,
+ 'metadata': volume_metadata}
volume_host = volume['host']
host = vol_utils.extract_host(volume_host, 'backend')
host_and_pool = vol_utils.append_host(host, store_spec['pool_name'])
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
+ lun_type = self._extract_provider_location_for_lun(
+ src_vref['provider_location'], 'type')
+ if lun_type == 'smp':
+ msg = (_('Failed to clone %s because it is a '
+ 'snapshot mount point.')
+ % src_vref['name'])
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
self._volume_creation_check(volume)
source_volume_name = src_vref['name']
source_lun_id = self.get_lun_id(src_vref)
volume_size = src_vref['size']
consistencygroup_id = src_vref['consistencygroup_id']
- snapshot_name = 'tmp-snap-%s' % volume['id']
+ snapshot_name = self._construct_tmp_snap_name(volume)
tmp_cgsnapshot_name = None
if consistencygroup_id:
- tmp_cgsnapshot_name = 'tmp-cgsnapshot-%s' % volume['id']
+ tmp_cgsnapshot_name = self.tmp_cgsnap_prefix + volume['id']
snapshot = {
'name': snapshot_name,
'consistencygroup_id': consistencygroup_id,
'id': tmp_cgsnapshot_name
}
- store_spec = self._construct_store_spec(volume, snapshot)
flow_name = 'create_cloned_volume'
+ store_spec = self._construct_store_spec(volume, snapshot)
+ volume_metadata = self._get_volume_metadata(volume)
work_flow = linear_flow.Flow(flow_name)
+ if store_spec['snapcopy'] == 'true':
+ snapshot['name'] = self._construct_snap_as_vol_name(volume)
store_spec.update({'snapshot': snapshot})
store_spec.update({'source_lun_id': source_lun_id})
- work_flow.add(CreateSnapshotTask(),
- CreateSMPTask(),
- AttachSnapTask(),
- CreateDestLunTask(),
- MigrateLunTask())
- flow_engine = taskflow.engines.load(work_flow,
- store=store_spec)
- flow_engine.run()
- new_lun_id = flow_engine.storage.fetch('new_lun_id')
- # Delete temp Snapshot
- if consistencygroup_id:
- self._client.delete_cgsnapshot(snapshot['id'])
+ if store_spec['snapcopy'] == 'false':
+ work_flow.add(CreateSnapshotTask(),
+ CreateSMPTask(),
+ AttachSnapTask(),
+ CreateDestLunTask(),
+ MigrateLunTask())
+ flow_engine = taskflow.engines.load(work_flow,
+ store=store_spec)
+ flow_engine.run()
+ new_lun_id = flow_engine.storage.fetch('new_lun_id')
+ # Delete temp Snapshot
+ if consistencygroup_id:
+ self._client.delete_cgsnapshot(snapshot['id'])
+ else:
+ self.delete_snapshot(snapshot)
+ pl = self._build_provider_location_for_lun(new_lun_id, 'lun')
+ volume_metadata['lun_type'] = 'lun'
else:
- self.delete_snapshot(snapshot)
-
- model_update = {'provider_location':
- self._build_provider_location_for_lun(new_lun_id)}
+ work_flow.add(CreateSnapshotTask(),
+ CreateSMPTask(),
+ AttachSnapTask())
+ flow_engine = taskflow.engines.load(work_flow,
+ store=store_spec)
+ flow_engine.run()
+ new_lun_id = flow_engine.storage.fetch('new_smp_id')
+ pl = self._build_provider_location_for_lun(new_lun_id, 'smp')
+ volume_metadata['lun_type'] = 'smp'
+
+ model_update = {'provider_location': pl,
+ 'metadata': volume_metadata}
volume_host = volume['host']
host = vol_utils.extract_host(volume_host, 'backend')
host_and_pool = vol_utils.append_host(host, store_spec['pool_name'])
return model_update
+ def _get_volume_metadata(self, volume):
+ volume_metadata = {}
+ if 'volume_metadata' in volume:
+ for metadata in volume['volume_metadata']:
+ volume_metadata[metadata['key']] = metadata['value']
+ return volume_metadata
+
def dumps_provider_location(self, pl_dict):
return '|'.join([k + '^' + pl_dict[k] for k in pl_dict])
- def _build_provider_location_for_lun(self, lun_id):
+ def _build_provider_location_for_lun(self, lun_id, type='lun'):
pl_dict = {'system': self.get_array_serial(),
- 'type': 'lun',
+ 'type': type,
'id': six.text_type(lun_id),
'version': self.VERSION}
return self.dumps_provider_location(pl_dict)
if group.get('volume_type_id') is not None:
for id in group['volume_type_id'].split(","):
if id:
- provisioning, tiering = self._get_extra_spec_value(
- volume_types.get_volume_type_extra_specs(id))
+ provisioning, tiering, snapcopy = (
+ self._get_extra_spec_value(
+ volume_types.get_volume_type_extra_specs(id)))
if provisioning == 'compressed':
msg = _("Failed to create consistency group %s "
"because VNX consistency group cannot "
"accept compressed LUNs as members."
) % group['id']
raise exception.VolumeBackendAPIException(data=msg)
+ if snapcopy == 'true':
+ msg = _("Failed to create consistency group %s "
+ "because VNX consistency group cannot "
+ "enable copytype:snap=True on its members."
+ ) % group['id']
+ raise exception.VolumeBackendAPIException(data=msg)
def create_consistencygroup(self, context, group):
"""Creates a consistency group."""
for i, (volume, snap) in enumerate(zip(volumes, snapshots)):
specs = self.get_volumetype_extraspecs(volume)
- provisioning, tiering = self._get_and_validate_extra_specs(specs)
+ provisioning, tiering, snapcopy = (
+ self._get_and_validate_extra_specs(specs))
pool_name = self. get_target_storagepool(volume, snap['volume'])
sub_store_spec = {
'volume': volume,
'volume_size': volume['size'],
'provisioning': provisioning,
'tiering': tiering,
- 'ignore_pool_full_threshold': self.ignore_pool_full_threshold
+ 'ignore_pool_full_threshold': self.ignore_pool_full_threshold,
+ 'snapcopy': snapcopy
}
work_flow.add(
CreateSMPTask(name="CreateSMPTask%s" % i,
Reversion strategy: Delete the SMP.
"""
+ def __init__(self, name=None, inject=None):
+ super(CreateSMPTask, self).__init__(name=name,
+ provides='new_smp_id',
+ inject=inject)
+
def execute(self, client, volume, source_vol_name, *args, **kwargs):
LOG.debug('CreateSMPTask.execute')
client.create_mount_point(source_vol_name, volume['name'])
+ return client.get_lun_by_name(volume['name'])['lun_id']
def revert(self, result, client, volume, *args, **kwargs):
LOG.debug('CreateSMPTask.revert')
Reversion strategy: Detach the SMP.
"""
- def execute(self, client, volume, snap_name, *args, **kwargs):
+ def execute(self, client, volume, snapcopy, snap_name,
+ *args, **kwargs):
LOG.debug('AttachSnapTask.execute')
client.attach_mount_point(volume['name'], snap_name)
with excutils.save_and_reraise_exception() as ctxt:
is_not_smp_err = (
ex.kwargs["rc"] == 163 and
- client.CLI_RESP_PATTERM_IS_NOT_SMP in
- "".join(ex.kwargs["out"]))
+ VNXError.has_error("".join(ex.kwargs["out"]),
+ VNXError.LUN_IS_NOT_SMP))
ctxt.reraise = not is_not_smp_err
rebind=rebind)
self.wait_for_completion = wait_for_completion
- def execute(self, client, dest_vol_name, volume, lun_data,
- *args, **kwargs):
+ def execute(self, client, new_smp_id, lun_data, *args, **kwargs):
LOG.debug('MigrateLunTask.execute')
- new_vol_name = volume['name']
- new_vol_lun_id = client.get_lun_by_name(new_vol_name)['lun_id']
dest_vol_lun_id = lun_data['lun_id']
- LOG.info(_LI('Migrating Mount Point Volume: %s'), new_vol_name)
+ LOG.debug('Migrating Mount Point Volume ID: %s', new_smp_id)
if self.wait_for_completion:
- migrated = client.migrate_lun_with_verification(new_vol_lun_id,
+ migrated = client.migrate_lun_with_verification(new_smp_id,
dest_vol_lun_id,
None)
else:
migrated = client.migrate_lun_without_verification(
- new_vol_lun_id, dest_vol_lun_id, None)
+ new_smp_id, dest_vol_lun_id, None)
if not migrated:
msg = (_("Migrate volume failed between source vol %(src)s"
" and dest vol %(dst)s.") %
- {'src': new_vol_name, 'dst': dest_vol_name})
+ {'src': new_smp_id, 'dst': dest_vol_lun_id})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
- return new_vol_lun_id
+ return new_smp_id
def revert(self, *args, **kwargs):
pass
'%(source_name)s.'),
{'new_name': snap_name,
'source_name': src_snap_name})
- client.delete_cgsnapshot(snap_name)
+ client.delete_snapshot(snap_name)
class AllowReadWriteOnSnapshotTask(task.Task):