-# Copyright (c) 2012 - 2014 EMC Corporation, Inc.
+# Copyright (c) 2012 - 2015 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
SUCCEED = ("", 0)
FAKE_ERROR_RETURN = ("FAKE ERROR", 255)
+VERSION = emc_vnx_cli.EMCVnxCliBase.VERSION
class EMCVNXCLIDriverTestData():
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
+ 'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
- 'provider_location': 'system^FNM11111|type^lun|lun_id^1',
+ 'provider_location': 'system^FNM11111|type^lun|id^1|version^05.02.00',
+ 'display_name': 'vol1',
+ 'display_description': 'test volume',
+ 'volume_type_id': None,
+ 'consistencygroup_id': None,
+ 'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}]
+ }
+
+ test_legacy_volume = {
+ 'name': 'vol1',
+ 'size': 1,
+ 'volume_name': 'vol1',
+ 'id': '1',
+ 'provider_auth': None,
+ 'host': "host@backendsec#unit_test_pool",
+ 'project_id': 'project',
+ 'provider_location': 'system^FNM11111|type^lun|id^1',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
+ 'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
+ 'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
+ 'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id': None,
'volume_admin_metadata': [{'key': 'attached_mode', 'value': 'rw'},
- {'key': 'readonly', 'value': 'False'}]
+ {'key': 'readonly', 'value': 'False'}],
+ 'provider_location': 'system^FNM11111|type^lun|id^1|version^05.02.00',
}
test_volume2 = {
'volume_name': 'vol2',
'id': '1',
'provider_auth': None,
+ 'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'vol2',
'consistencygroup_id': None,
'volume_name': 'vol2',
'id': '1',
'provider_auth': None,
+ 'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'vol2',
'consistencygroup_id': None,
'volume_name': 'vol_with_type',
'id': '1',
'provider_auth': None,
+ 'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'thin_vol',
'consistencygroup_id': None,
'volume_name': 'failed_vol1',
'id': '4',
'provider_auth': None,
+ 'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'failed_vol',
'consistencygroup_id': None,
'volume_name': 'vol1_in_sg',
'id': '4',
'provider_auth': None,
+ 'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'failed_vol',
'display_description': 'Volume 1 in SG',
'volume_type_id': None,
- 'provider_location': 'system^fakesn|type^lun|id^4'}
+ 'provider_location': 'system^fakesn|type^lun|id^4|version^05.02.00'}
test_volume2_in_sg = {
'name': 'vol2_in_sg',
'volume_name': 'vol2_in_sg',
'id': '5',
'provider_auth': None,
+ 'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'failed_vol',
'display_description': 'Volume 2 in SG',
'volume_type_id': None,
- 'provider_location': 'system^fakesn|type^lun|id^3'}
+ 'provider_location': 'system^fakesn|type^lun|id^3|version^05.02.00'}
test_snapshot = {
'name': 'snapshot1',
'id': '2',
'volume_name': 'vol1',
'provider_auth': None,
+ 'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'clone1',
'consistencygroup_id': None,
'id': '2',
'volume_name': 'vol1',
'provider_auth': None,
+ 'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'clone1',
'consistencygroup_id': 'consistencygroup_id',
'wwpns': ["1234567890123456", "1234567890543216"],
'wwnns': ["2234567890123456", "2234567890543216"],
'host': 'fakehost'}
- test_volume3 = {'migration_status': None, 'availability_zone': 'nova',
- 'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24',
- 'name': 'vol3',
- 'size': 2,
- 'volume_admin_metadata': [],
- 'status': 'available',
- 'volume_type_id':
- '19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
- 'deleted': False, 'provider_location': None,
- 'host': 'ubuntu-server12@pool_backend_1',
- 'source_volid': None, 'provider_auth': None,
- 'display_name': 'vol-test02', 'instance_uuid': None,
- 'attach_status': 'detached',
- 'volume_type': [],
- 'attached_host': None,
- 'provider_location': 'system^FNM11111|type^lun|lun_id^1',
- '_name_id': None, 'volume_metadata': []}
+ test_volume3 = {
+ 'migration_status': None, 'availability_zone': 'nova',
+ 'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24',
+ 'name': 'vol3',
+ 'size': 2,
+ 'volume_admin_metadata': [],
+ 'status': 'available',
+ 'volume_type_id':
+ '19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
+ 'deleted': False,
+ 'host': "host@backendsec#unit_test_pool",
+ 'source_volid': None, 'provider_auth': None,
+ 'display_name': 'vol-test02', 'instance_uuid': None,
+ 'attach_status': 'detached',
+ 'volume_type': [],
+ 'attached_host': None,
+ 'provider_location':
+ 'system^FNM11111|type^lun|id^1|version^05.02.00',
+ '_name_id': None, 'volume_metadata': []}
test_new_type = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs':
{'storagetype:provisioning': ('thick', 'thin')}}
- test_host = {'host': 'ubuntu-server12@pool_backend_1',
+ test_host = {'host': 'ubuntu-server12@pool_backend_1#POOL_SAS1',
'capabilities':
- {'location_info': 'POOL_SAS1|FNM00124500890',
+ {'pool_name': 'POOL_SAS1',
+ 'location_info': 'POOL_SAS1|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
- test_volume4 = {'migration_status': None, 'availability_zone': 'nova',
- 'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24',
- 'name': 'vol4',
- 'size': 2L,
- 'volume_admin_metadata': [],
- 'status': 'available',
- 'volume_type_id':
- '19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
- 'deleted': False, 'provider_location': None,
- 'host': 'ubuntu-server12@array_backend_1',
- 'source_volid': None, 'provider_auth': None,
- 'display_name': 'vol-test02', 'instance_uuid': None,
- 'attach_status': 'detached',
- 'volume_type': [],
- '_name_id': None, 'volume_metadata': []}
-
test_volume5 = {'migration_status': None, 'availability_zone': 'nova',
'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce25',
'name_id': '1181d1b2-cea3-4f55-8fa8-3360d026ce25',
'volume_type_id':
'19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
'deleted': False, 'provider_location':
- 'system^FNM11111|type^lun|lun_id^5',
- 'host': 'ubuntu-server12@array_backend_1',
+ 'system^FNM11111|type^lun|id^5|version^05.02.00',
+ 'host': 'ubuntu-server12@array_backend_1#unit_test_pool',
'source_volid': None, 'provider_auth': None,
'display_name': 'vol-test05', 'instance_uuid': None,
'attach_status': 'detached',
'name': 'snapshot1',
'size': 1,
'id': 'cgsnapshot_id',
+ 'volume': test_volume,
'volume_name': 'vol1',
'volume_size': 1,
'consistencygroup_id': 'consistencygroup_id',
POOL_PROPERTY_CMD = ('storagepool', '-list', '-name', 'unit_test_pool',
'-userCap', '-availableCap')
+ POOL_PROPERTY_W_FASTCACHE_CMD = ('storagepool', '-list', '-name',
+ 'unit_test_pool', '-availableCap',
+ '-userCap', '-fastcache')
+
+ def POOL_GET_ALL_CMD(self, withfastcache=False):
+ if withfastcache:
+ return ('storagepool', '-list', '-availableCap',
+ '-userCap', '-fastcache')
+ else:
+ return ('storagepool', '-list', '-availableCap',
+ '-userCap')
+
+ def POOL_GET_ALL_RESULT(self, withfastcache=False):
+ if withfastcache:
+ return ("Pool Name: unit_test_pool1\n"
+ "Pool ID: 0\n"
+ "User Capacity (Blocks): 6881061888\n"
+ "User Capacity (GBs): 3281.146\n"
+ "Available Capacity (Blocks): 6512292864\n"
+ "Available Capacity (GBs): 3105.303\n"
+ "FAST Cache: Enabled\n"
+ "\n"
+ "Pool Name: unit test pool 2\n"
+ "Pool ID: 1\n"
+ "User Capacity (Blocks): 8598306816\n"
+ "User Capacity (GBs): 4099.992\n"
+ "Available Capacity (Blocks): 8356663296\n"
+ "Available Capacity (GBs): 3984.768\n"
+ "FAST Cache: Disabled\n", 0)
+ else:
+ return ("Pool Name: unit_test_pool1\n"
+ "Pool ID: 0\n"
+ "User Capacity (Blocks): 6881061888\n"
+ "User Capacity (GBs): 3281.146\n"
+ "Available Capacity (Blocks): 6512292864\n"
+ "Available Capacity (GBs): 3105.303\n"
+ "\n"
+ "Pool Name: unit test pool 2\n"
+ "Pool ID: 1\n"
+ "User Capacity (Blocks): 8598306816\n"
+ "User Capacity (GBs): 4099.992\n"
+ "Available Capacity (Blocks): 8356663296\n"
+ "Available Capacity (GBs): 3984.768\n", 0)
+
NDU_LIST_CMD = ('ndu', '-list')
NDU_LIST_RESULT = ("Name of the software package: -Compression " +
"Name of the software package: -Deduplication " +
"Name of the software package: -FAST " +
"Name of the software package: -FASTCache " +
- "Name of the software package: -ThinProvisioning ",
+ "Name of the software package: -ThinProvisioning "
+ "Name of the software package: -VNXSnapshots",
0)
+ NDU_LIST_RESULT_WO_LICENSE = (
+ "Name of the software package: -Unisphere ",
+ 0)
+
def SNAP_MP_CREATE_CMD(self, name='vol1', source='vol1'):
return ('lun', '-create', '-type', 'snap', '-primaryLunName',
source, '-name', name)
'-allowReadWrite', 'yes',
'-allowAutoDelete', 'no')
- def SNAP_LIST_CMD(self, res_id=1, poll=True):
+ def SNAP_LIST_CMD(self, res_id=1):
cmd = ('snap', '-list', '-res', res_id)
- if not poll:
- cmd = ('-np',) + cmd
return cmd
def LUN_DELETE_CMD(self, name):
return ('lun', '-expand', '-name', name, '-capacity', newsize,
'-sq', 'gb', '-o', '-ignoreThresholds')
+ def LUN_PROPERTY_POOL_CMD(self, lunname):
+ return ('lun', '-list', '-name', lunname, '-poolName')
+
def LUN_PROPERTY_ALL_CMD(self, lunname):
return ('lun', '-list', '-name', lunname,
'-state', '-status', '-opDetails', '-userCap', '-owner',
""", 0)
+ POOL_PROPERTY_W_FASTCACHE = (
+ "Pool Name: unit_test_pool\n"
+ "Pool ID: 1\n"
+ "User Capacity (Blocks): 6881061888\n"
+ "User Capacity (GBs): 3281.146\n"
+ "Available Capacity (Blocks): 6832207872\n"
+ "Available Capacity (GBs): 3257.851\n"
+ "FAST Cache: Enabled\n\n", 0)
+
ALL_PORTS = ("SP: A\n" +
"Port ID: 4\n" +
"Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.a4\n" +
0)
-class EMCVNXCLIDriverISCSITestCase(test.TestCase):
-
+class DriverTestCaseBase(test.TestCase):
def setUp(self):
- super(EMCVNXCLIDriverISCSITestCase, self).setUp()
+ super(DriverTestCaseBase, self).setUp()
self.stubs.Set(CommandLineHelper, 'command_execute',
- self.fake_setup_command_execute)
+ self.fake_command_execute_for_driver_setup)
self.stubs.Set(CommandLineHelper, 'get_array_serial',
mock.Mock(return_value={'array_serial':
'fakeSerial'}))
self.configuration.default_timeout = 0.0002
self.configuration.initiator_auto_registration = True
self.configuration.check_max_pool_luns_threshold = False
- self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get)
+ self.stubs.Set(self.configuration, 'safe_get',
+ self.fake_safe_get({'storage_vnx_pool_name':
+ 'unit_test_pool',
+ 'volume_backend_name':
+ 'namedbackend'}))
self.testData = EMCVNXCLIDriverTestData()
self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
'-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
def driverSetup(self, commands=tuple(), results=tuple()):
- self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
+ self.driver = self.generateDriver(self.configuration)
fake_command_execute = self.get_command_execute_simulator(
commands, results)
fake_cli = mock.Mock(side_effect=fake_command_execute)
self.driver.cli._client.command_execute = fake_cli
return fake_cli
+ def generateDriver(self, conf):
+ raise NotImplementedError
+
def get_command_execute_simulator(self, commands=tuple(),
results=tuple()):
assert(len(commands) == len(results))
return standard_default
+ def fake_command_execute_for_driver_setup(self, *command, **kwargv):
+ if command == ('connection', '-getport', '-address', '-vlanid'):
+ return self.testData.ALL_PORTS
+ else:
+ return SUCCEED
+
+ def fake_safe_get(self, values):
+ def _safe_get(key):
+ return values.get(key)
+ return _safe_get
+
+
+class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
+ def generateDriver(self, conf):
+ return EMCCLIISCSIDriver(configuration=conf)
+
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
re.match(r".*Compression Enabler is not installed",
ex.msg))
- @mock.patch(
- "eventlet.event.Event.wait",
- mock.Mock(return_value=None))
- @mock.patch(
- "cinder.volume.volume_types."
- "get_volume_type_extra_specs",
- mock.Mock(return_value={'storagetype:provisioning': 'Compressed',
- 'storagetype:pool': 'unit_test_pool'}))
- def test_create_compression_volume_on_array_backend(self):
- """Unit test for create a compression volume on array
- backend.
- """
- #Set up the array backend
- config = conf.Configuration(None)
- config.append_config_values = mock.Mock(return_value=0)
- config.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
- config.san_ip = '10.0.0.1'
- config.san_login = 'sysadmin'
- config.san_password = 'sysadmin'
- config.default_timeout = 0.0002
- config.initiator_auto_registration = True
- config.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
- '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
- config.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
- self.driver = EMCCLIISCSIDriver(configuration=config)
- assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliArray)
-
- commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
- self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
- self.testData.NDU_LIST_CMD]
- results = [self.testData.LUN_PROPERTY('vol_with_type', True),
- self.testData.LUN_PROPERTY('vol_with_type', True),
- self.testData.NDU_LIST_RESULT]
- fake_command_execute = self.get_command_execute_simulator(
- commands, results)
- fake_cli = mock.MagicMock(side_effect=fake_command_execute)
- self.driver.cli._client.command_execute = fake_cli
-
- self.driver.cli.stats['compression_support'] = 'True'
- self.driver.cli.enablers = ['-Compression',
- '-Deduplication',
- '-ThinProvisioning',
- '-FAST']
- #case
- self.driver.create_volume(self.testData.test_volume_with_type)
- #verification
- expect_cmd = [
- mock.call(*self.testData.LUN_CREATION_CMD(
- 'vol_with_type', 1,
- 'unit_test_pool',
- 'compressed', None, False)),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
- 'vol_with_type'), poll=False),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
- 'vol_with_type'), poll=True),
- mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
- 1))]
- fake_cli.assert_has_calls(expect_cmd)
-
def test_get_volume_stats(self):
- self.driverSetup()
+ commands = [self.testData.NDU_LIST_CMD,
+ self.testData.POOL_PROPERTY_W_FASTCACHE_CMD]
+ results = [self.testData.NDU_LIST_RESULT,
+ self.testData.POOL_PROPERTY_W_FASTCACHE]
+ self.driverSetup(commands, results)
stats = self.driver.get_volume_stats(True)
- self.assertTrue(stats['driver_version'] is not None,
- "driver_version is not returned")
- self.assertTrue(
- stats['free_capacity_gb'] == 3257.851,
- "free_capacity_gb is not correct")
- self.assertTrue(
- stats['reserved_percentage'] == 3,
- "reserved_percentage is not correct")
+
+ self.assertTrue(stats['driver_version'] == VERSION,
+ "driver_version is incorrect")
self.assertTrue(
stats['storage_protocol'] == 'iSCSI',
- "storage_protocol is not correct")
- self.assertTrue(
- stats['total_capacity_gb'] == 3281.146,
- "total_capacity_gb is not correct")
+ "storage_protocol is incorrect")
self.assertTrue(
stats['vendor_name'] == "EMC",
- "vender name is not correct")
+ "vendor name is incorrect")
self.assertTrue(
stats['volume_backend_name'] == "namedbackend",
- "volume backend name is not correct")
- self.assertTrue(stats['location_info'] == "unit_test_pool|fakeSerial")
- self.assertTrue(
- stats['driver_version'] == "05.01.00",
- "driver version is incorrect.")
+ "volume backend name is incorrect")
+
+ pool_stats = stats['pools'][0]
+
+ expected_pool_stats = {
+ 'free_capacity_gb': 3257.851,
+ 'reserved_percentage': 3,
+ 'location_info': 'unit_test_pool|fakeSerial',
+ 'total_capacity_gb': 3281.146,
+ 'compression_support': 'True',
+ 'deduplication_support': 'True',
+ 'thinprovisioning_support': 'True',
+ 'consistencygroup_support': 'True',
+ 'pool_name': 'unit_test_pool',
+ 'fast_cache_enabled': 'True',
+ 'fast_support': 'True'}
+
+ self.assertEqual(expected_pool_stats, pool_stats)
def test_get_volume_stats_too_many_luns(self):
- commands = [self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
- results = [self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
+ commands = [self.testData.NDU_LIST_CMD,
+ self.testData.POOL_PROPERTY_W_FASTCACHE_CMD,
+ self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
+ results = [self.testData.NDU_LIST_RESULT,
+ self.testData.POOL_PROPERTY_W_FASTCACHE,
+ self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.check_max_pool_luns_threshold = True
stats = self.driver.get_volume_stats(True)
+ pool_stats = stats['pools'][0]
self.assertTrue(
- stats['free_capacity_gb'] == 0,
- "free_capacity_gb is not correct")
- expect_cmd = [
- mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
- poll=False)]
- fake_cli.assert_has_calls(expect_cmd)
+ pool_stats['free_capacity_gb'] == 0,
+ "free_capacity_gb is incorrect")
expect_cmd = [
mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
poll=False)]
self.driver.cli.check_max_pool_luns_threshold = False
stats = self.driver.get_volume_stats(True)
+ pool_stats = stats['pools'][0]
self.assertTrue(stats['driver_version'] is not None,
"driver_version is not returned")
self.assertTrue(
- stats['free_capacity_gb'] == 3257.851,
- "free_capacity_gb is not correct")
+ pool_stats['free_capacity_gb'] == 3257.851,
+ "free_capacity_gb is incorrect")
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
self.testData.test_volume,
self.testData.connector)
- self.assertEqual(connection_info,
- self.testData.iscsi_connection_info_ro)
+ self.assertEqual(self.testData.iscsi_connection_info_ro,
+ connection_info)
expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
poll=False),
self.testData.PING_OK
]
fake_cli = self.driverSetup(commands, results)
- test_volume_rw = self.testData.test_volume_rw.copy()
- test_volume_rw['provider_location'] = 'system^fakesn|type^lun|id^1'
+ test_volume_rw = self.testData.test_volume_rw
connection_info = self.driver.initialize_connection(
test_volume_rw,
self.testData.connector)
- self.assertEqual(connection_info,
- self.testData.iscsi_connection_info_rw)
+ self.assertEqual(self.testData.iscsi_connection_info_rw,
+ connection_info)
expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
poll=False),
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [cmd_dest, cmd_dest_p, cmd_migrate,
- cmd_migrate_verify,
- self.testData.NDU_LIST_CMD]
+ cmd_migrate_verify]
results = [output_dest, output_dest, output_migrate,
- output_migrate_verify,
- self.testData.NDU_LIST_RESULT]
+ output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
self.driver.create_cloned_volume(self.testData.test_volume,
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
self.assertTrue(
- re.match(r'.*not in a manageable pool backend by cinder',
+ re.match(r'.*not managed by the host',
ex.msg))
expected = [mock.call(*get_lun_cmd, poll=True)]
fake_cli.assert_has_calls(expected)
- def test_manage_existing_get_size_pool_backend(self):
+ def test_manage_existing_get_size(self):
get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
'-state', '-userCap', '-owner',
'-attachedSnapshot', '-poolName')
self.testData.test_volume_with_type,
invaild_ref)
- def test_manage_existing_get_size_array_backend(self):
- get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
- '-state', '-status', '-opDetails', '-userCap', '-owner',
- '-attachedSnapshot',)
- test_size = 2
- commands = [get_lun_cmd]
- results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)]
-
- self.configuration.safe_get = mock.Mock(return_value=None)
- self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
- assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliArray)
-
- # Mock the command executor
- fake_command_execute = self.get_command_execute_simulator(
- commands, results)
- fake_cli = mock.MagicMock(side_effect=fake_command_execute)
- self.driver.cli._client.command_execute = fake_cli
-
- get_size = self.driver.manage_existing_get_size(
- self.testData.test_volume_with_type,
- self.testData.test_existing_ref)
- expected = [mock.call(*get_lun_cmd, poll=True)]
- assert get_size == test_size
- fake_cli.assert_has_calls(expected)
- self.configuration.safe_get = self.fake_safe_get
-
- def test_manage_existing_with_array_backend(self):
- """Unit test for the manage_existing with the
- array backend which is not support the manage
- existing functinality.
- """
- #Set up the array backend
- config = conf.Configuration(None)
- config.append_config_values = mock.Mock(return_value=0)
- config.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
- config.san_ip = '10.0.0.1'
- config.san_login = 'sysadmin'
- config.san_password = 'sysadmin'
- config.default_timeout = 0.0002
- config.initiator_auto_registration = True
- config.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
- '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
- config.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
- self.driver = EMCCLIISCSIDriver(configuration=config)
- assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliArray)
- #mock the command executor
- lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id,
- '-newName', 'vol_with_type', '-o')
- commands = [lun_rename_cmd]
- results = [SUCCEED]
- #mock the command executor
- fake_command_execute = self.get_command_execute_simulator(
- commands, results)
- fake_cli = mock.MagicMock(side_effect=fake_command_execute)
- self.driver.cli._client.command_execute = fake_cli
- self.driver.manage_existing(
- self.testData.test_volume_with_type,
- self.testData.test_existing_ref)
- expected = [mock.call(*lun_rename_cmd, poll=False)]
- fake_cli.assert_has_calls(expected)
-
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
'unit_test_pool2'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
- host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
+ host_test_data = {'host':
+ 'ubuntu-server12@pool_backend_1#unit_test_pool2',
'capabilities':
{'location_info': 'unit_test_pool2|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'unit_test_pool'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
- host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
- 'capabilities':
- {'location_info': 'unit_test_pool|FNM00124500890',
- 'volume_backend_name': 'pool_backend_1',
- 'storage_protocol': 'iSCSI'}}
+ host_test_data = {
+ 'host': 'host@backendsec#unit_test_pool',
+ 'capabilities': {
+ 'location_info': 'unit_test_pool|FNM00124500890',
+ 'volume_backend_name': 'pool_backend_1',
+ 'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
- self.testData.SNAP_LIST_CMD(poll=False)]
+ self.testData.SNAP_LIST_CMD()]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023)]
fake_cli = self.driverSetup(commands, results)
'unit_test_pool'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
- host_test_data = {'host': 'ubuntu-server12@pool_backend_2',
- 'capabilities':
- {'location_info': 'unit_test_pool|FNM00124500891',
- 'volume_backend_name': 'pool_backend_2',
- 'storage_protocol': 'iSCSI'}}
+ host_test_data = {
+ 'host': 'ubuntu-server12@pool_backend_2#unit_test_pool',
+ 'capabilities':
+ {'location_info': 'unit_test_pool|FNM00124500891',
+ 'volume_backend_name': 'pool_backend_2',
+ 'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
- self.testData.SNAP_LIST_CMD(poll=False)]
+ self.testData.SNAP_LIST_CMD()]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023)]
self.driverSetup(commands, results)
'unit_test_pool'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
- host_test_data = {'host': 'ubuntu-server12@pool_backend_2',
- 'capabilities':
- {'location_info': 'unit_test_pool|FNM00124500890',
- 'volume_backend_name': 'pool_backend_2',
- 'storage_protocol': 'FC'}}
+ host_test_data = {
+ 'host': 'ubuntu-server12@pool_backend_2#unit_test_pool',
+ 'capabilities':
+ {'location_info': 'unit_test_pool|FNM00124500890',
+ 'volume_backend_name': 'pool_backend_2',
+ 'storage_protocol': 'FC'}}
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD(),
'unit_test_pool'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
- host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
- 'capabilities':
- {'location_info': 'unit_test_pool|FNM00124500890',
- 'volume_backend_name': 'pool_backend_1',
- 'storage_protocol': 'iSCSI'}}
+ host_test_data = {
+ 'host': 'ubuntu-server12@pool_backend_1#unit_test_pool',
+ 'capabilities':
+ {'location_info': 'unit_test_pool|FNM00124500890',
+ 'volume_backend_name': 'pool_backend_1',
+ 'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
- self.testData.SNAP_LIST_CMD(poll=False)]
+ self.testData.SNAP_LIST_CMD()]
results = [self.testData.NDU_LIST_RESULT,
('Has snap', 0)]
self.driverSetup(commands, results)
'thin'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
- host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
- 'capabilities':
- {'location_info': 'unit_test_pool|FNM00124500890',
- 'volume_backend_name': 'pool_backend_1',
- 'storage_protocol': 'iSCSI'}}
+ host_test_data = {
+ 'host': 'ubuntu-server12@pool_backend_1#unit_test_pool',
+ 'capabilities':
+ {'location_info': 'unit_test_pool|FNM00124500890',
+ 'volume_backend_name': 'pool_backend_1',
+ 'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD]
results = [self.testData.NDU_LIST_RESULT]
mock.Mock(return_value={'fast_cache_enabled': 'True'}))
def test_create_volume_with_fastcache(self):
'''enable fastcache when creating volume.'''
- commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+ commands = [self.testData.NDU_LIST_CMD,
+ self.testData.POOL_PROPERTY_W_FASTCACHE_CMD,
self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
- self.testData.NDU_LIST_CMD,
- self.testData.CHECK_FASTCACHE_CMD(
- self.testData.test_pool_name)]
- results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+ ]
+ results = [self.testData.NDU_LIST_RESULT,
+ self.testData.POOL_PROPERTY_W_FASTCACHE,
self.testData.LUN_PROPERTY('vol_with_type', True),
- SUCCEED,
- ('FAST Cache: Enabled', 0)]
+ ]
fake_cli = self.driverSetup(commands, results)
lun_info = {'lun_name': "vol_with_type",
cli_helper.command_execute = fake_cli
cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
cli_helper.get_enablers_on_array = mock.Mock(return_value="-FASTCache")
- cli_helper.get_pool = mock.Mock(return_value={'lun_nums': 1000,
- 'total_capacity_gb': 10,
- 'free_capacity_gb': 5})
+ cli_helper.get_pool = mock.Mock(return_value={
+ 'lun_nums': 1000,
+ 'total_capacity_gb': 10,
+ 'free_capacity_gb': 5,
+ 'pool_name': "unit_test_pool",
+ 'fast_cache_enabled': 'True'})
+
self.driver.update_volume_stats()
self.driver.create_volume(self.testData.test_volume_with_type)
- self.assertEqual(self.driver.cli.stats['fast_cache_enabled'], 'True')
+ pool_stats = self.driver.cli.stats['pools'][0]
+ self.assertEqual('True', pool_stats['fast_cache_enabled'])
expect_cmd = [
mock.call('connection', '-getport', '-address', '-vlanid',
poll=False),
- mock.call('storagepool', '-list', '-name',
- 'Pool_02_SASFLASH', '-fastcache', poll=False),
mock.call('-np', 'lun', '-create', '-capacity',
1, '-sq', 'gb', '-poolName', 'Pool_02_SASFLASH',
'-name', 'vol_with_type', '-type', 'NonThin')
'volume_name': 'vol_01',
'id': '1',
'name_id': '1',
- 'provider_location': 'system^FNM11111|type^lun|lun_id^1',
+ 'provider_location': 'system^FNM11111|type^lun|id^4',
'project_id': 'project',
'display_name': 'vol_01',
'display_description': 'test volume',
'volume_type_id': None,
'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}]}
- self.assertEqual(self.driver.cli.get_lun_id(volume_01), 1)
+ self.assertEqual(4, self.driver.cli.get_lun_id(volume_01))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
'display_description': 'test volume',
'volume_type_id': None,
'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}]}
- self.assertEqual(self.driver.cli.get_lun_id(volume_02), 2)
+ self.assertEqual(2, self.driver.cli.get_lun_id(volume_02))
def test_create_consistency_group(self):
cg_name = self.testData.test_cg['id']
'-o')]
fake_cli.assert_has_calls(expect_cmd)
- def succeed_fake_command_execute(self, *command, **kwargv):
- return SUCCEED
-
- def fake_setup_command_execute(self, *command, **kwargv):
- return self.testData.ALL_PORTS
-
- def fake_get_pool_properties(self, filter_option, properties=None):
- pool_info = {'pool_name': "unit_test_pool0",
- 'total_capacity_gb': 1000.0,
- 'free_capacity_gb': 1000.0
- }
- return pool_info
-
- def fake_get_lun_properties(self, filter_option, properties=None):
- lun_info = {'lun_name': "vol1",
- 'lun_id': 1,
- 'pool': "unit_test_pool",
- 'attached_snapshot': "N/A",
- 'owner': "A",
- 'total_capacity_gb': 1.0,
- 'state': "Ready"}
- return lun_info
- def fake_safe_get(self, value):
- if value == "storage_vnx_pool_name":
- return "unit_test_pool"
- elif 'volume_backend_name' == value:
- return "namedbackend"
- else:
- return None
+class EMCVNXCLIDArrayBasedDriverTestCase(DriverTestCaseBase):
+ def setUp(self):
+ super(EMCVNXCLIDArrayBasedDriverTestCase, self).setUp()
+ self.configuration.safe_get = self.fake_safe_get(
+ {'storage_vnx_pool_name': None,
+ 'volume_backend_name': 'namedbackend'})
+ def generateDriver(self, conf):
+ driver = EMCCLIISCSIDriver(configuration=conf)
+ self.assertTrue(isinstance(driver.cli,
+ emc_vnx_cli.EMCVnxCliArray))
+ return driver
-class EMCVNXCLIDriverFCTestCase(test.TestCase):
+ def test_get_volume_stats(self):
+ commands = [self.testData.NDU_LIST_CMD,
+ self.testData.POOL_GET_ALL_CMD(True)]
+ results = [self.testData.NDU_LIST_RESULT,
+ self.testData.POOL_GET_ALL_RESULT(True)]
+ self.driverSetup(commands, results)
+ stats = self.driver.get_volume_stats(True)
- def setUp(self):
- super(EMCVNXCLIDriverFCTestCase, self).setUp()
+ self.assertTrue(stats['driver_version'] == VERSION,
+ "driver_version is incorrect")
+ self.assertTrue(
+ stats['storage_protocol'] == 'iSCSI',
+ "storage_protocol is not correct")
+ self.assertTrue(
+ stats['vendor_name'] == "EMC",
+ "vendor name is not correct")
+ self.assertTrue(
+ stats['volume_backend_name'] == "namedbackend",
+ "volume backend name is not correct")
- self.stubs.Set(CommandLineHelper, 'command_execute',
- self.fake_setup_command_execute)
- self.stubs.Set(CommandLineHelper, 'get_array_serial',
- mock.Mock(return_value={'array_serial':
- "fakeSerial"}))
- self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1))
+ self.assertEqual(2, len(stats['pools']))
+ pool_stats1 = stats['pools'][0]
+ expected_pool_stats1 = {
+ 'free_capacity_gb': 3105.303,
+ 'reserved_percentage': 2,
+ 'location_info': 'unit_test_pool1|fakeSerial',
+ 'total_capacity_gb': 3281.146,
+ 'compression_support': 'True',
+ 'deduplication_support': 'True',
+ 'thinprovisioning_support': 'True',
+ 'consistencygroup_support': 'True',
+ 'pool_name': 'unit_test_pool1',
+ 'fast_cache_enabled': 'True',
+ 'fast_support': 'True'}
+ self.assertEqual(expected_pool_stats1, pool_stats1)
+
+ pool_stats2 = stats['pools'][1]
+ expected_pool_stats2 = {
+ 'free_capacity_gb': 3984.768,
+ 'reserved_percentage': 2,
+ 'location_info': 'unit test pool 2|fakeSerial',
+ 'total_capacity_gb': 4099.992,
+ 'compression_support': 'True',
+ 'deduplication_support': 'True',
+ 'thinprovisioning_support': 'True',
+ 'consistencygroup_support': 'True',
+ 'pool_name': 'unit test pool 2',
+ 'fast_cache_enabled': 'False',
+ 'fast_support': 'True'}
+ self.assertEqual(expected_pool_stats2, pool_stats2)
+
+ def test_get_volume_stats_wo_fastcache(self):
+ commands = [self.testData.NDU_LIST_CMD,
+ self.testData.POOL_GET_ALL_CMD(False)]
+ results = [self.testData.NDU_LIST_RESULT_WO_LICENSE,
+ self.testData.POOL_GET_ALL_RESULT(False)]
+ self.driverSetup(commands, results)
- self.stubs.Set(emc_vnx_cli, 'INTERVAL_5_SEC', 0.01)
- self.stubs.Set(emc_vnx_cli, 'INTERVAL_30_SEC', 0.01)
- self.stubs.Set(emc_vnx_cli, 'INTERVAL_60_SEC', 0.01)
+ stats = self.driver.get_volume_stats(True)
- self.configuration = conf.Configuration(None)
- self.configuration.append_config_values = mock.Mock(return_value=0)
- self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
- self.configuration.san_ip = '10.0.0.1'
- self.configuration.storage_vnx_pool_name = 'unit_test_pool'
- self.configuration.san_login = 'sysadmin'
- self.configuration.san_password = 'sysadmin'
- #set the timeout to 0.012s = 0.0002 * 60 = 1.2ms
- self.configuration.default_timeout = 0.0002
- self.configuration.initiator_auto_registration = True
- self.configuration.check_max_pool_luns_threshold = False
- self.configuration.zoning_mode = None
- self.configuration.max_luns_per_storage_pool = 4000
- self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get)
- self.testData = EMCVNXCLIDriverTestData()
- self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
- '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
+ self.assertEqual(2, len(stats['pools']))
+ pool_stats1 = stats['pools'][0]
+ expected_pool_stats1 = {
+ 'free_capacity_gb': 3105.303,
+ 'reserved_percentage': 2,
+ 'location_info': 'unit_test_pool1|fakeSerial',
+ 'total_capacity_gb': 3281.146,
+ 'compression_support': 'False',
+ 'deduplication_support': 'False',
+ 'thinprovisioning_support': 'False',
+ 'consistencygroup_support': 'False',
+ 'pool_name': 'unit_test_pool1',
+ 'fast_cache_enabled': 'False',
+ 'fast_support': 'False'}
+ self.assertEqual(expected_pool_stats1, pool_stats1)
+
+ pool_stats2 = stats['pools'][1]
+ expected_pool_stats2 = {
+ 'free_capacity_gb': 3984.768,
+ 'reserved_percentage': 2,
+ 'location_info': 'unit test pool 2|fakeSerial',
+ 'total_capacity_gb': 4099.992,
+ 'compression_support': 'False',
+ 'deduplication_support': 'False',
+ 'thinprovisioning_support': 'False',
+ 'consistencygroup_support': 'False',
+ 'pool_name': 'unit test pool 2',
+ 'fast_cache_enabled': 'False',
+ 'fast_support': 'False'}
+ self.assertEqual(expected_pool_stats2, pool_stats2)
- def driverSetup(self, commands=tuple(), results=tuple()):
- self.driver = EMCCLIFCDriver(configuration=self.configuration)
- fake_command_execute = self.get_command_execute_simulator(
- commands, results)
- fake_cli = mock.Mock(side_effect=fake_command_execute)
- self.driver.cli._client.command_execute = fake_cli
- return fake_cli
+ @mock.patch(
+ "eventlet.event.Event.wait",
+ mock.Mock(return_value=None))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:provisioning': 'deduplicated'}))
+ def test_create_volume_deduplicated(self):
+ commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type')]
+ results = [self.testData.LUN_PROPERTY('vol_with_type', True)]
- def get_command_execute_simulator(self, commands=tuple(),
- results=tuple()):
+ fake_cli = self.driverSetup(commands, results)
+ self.driver.cli.enablers = ['-Compression',
+ '-Deduplication',
+ '-ThinProvisioning',
+ '-FAST']
+ # Case
+ self.driver.create_volume(self.testData.test_volume_with_type)
- assert(len(commands) == len(results))
+ # Verification
+ expect_cmd = [
+ mock.call(*self.testData.LUN_CREATION_CMD(
+ 'vol_with_type', 1,
+ 'unit_test_pool',
+ 'deduplicated', None, False)),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+ poll=False)]
+ fake_cli.assert_has_calls(expect_cmd)
- def fake_command_execute(*args, **kwargv):
- for i in range(len(commands)):
- if args == commands[i]:
- if isinstance(results[i], list):
- if len(results[i]) > 0:
- ret = results[i][0]
- del results[i][0]
- return ret
- else:
- return results[i]
- return self.standard_fake_command_execute(*args, **kwargv)
- return fake_command_execute
+ def test_get_pool(self):
+ testVolume = self.testData.test_volume_with_type
+ commands = [self.testData.LUN_PROPERTY_POOL_CMD(testVolume['name'])]
+ results = [self.testData.LUN_PROPERTY(testVolume['name'], False)]
+ fake_cli = self.driverSetup(commands, results)
+ pool = self.driver.get_pool(testVolume)
+ self.assertEqual('Pool_02_SASFLASH', pool)
+ fake_cli.assert_has_calls(
+ [mock.call(*self.testData.LUN_PROPERTY_POOL_CMD(
+ testVolume['name']), poll=False)])
+
+ def test_get_target_pool_for_cloned_volme(self):
+ testSrcVolume = self.testData.test_volume
+ testNewVolume = self.testData.test_volume2
+ fake_cli = self.driverSetup()
+ pool = self.driver.cli.get_target_storagepool(testNewVolume,
+ testSrcVolume)
+ self.assertEqual('unit_test_pool', pool)
+ self.assertFalse(fake_cli.called)
+
+ def test_get_target_pool_for_clone_legacy_volme(self):
+ testSrcVolume = self.testData.test_legacy_volume
+ testNewVolume = self.testData.test_volume2
+ commands = [self.testData.LUN_PROPERTY_POOL_CMD(testSrcVolume['name'])]
+ results = [self.testData.LUN_PROPERTY(testSrcVolume['name'], False)]
+ fake_cli = self.driverSetup(commands, results)
+ pool = self.driver.cli.get_target_storagepool(testNewVolume,
+ testSrcVolume)
+ self.assertEqual('Pool_02_SASFLASH', pool)
+ fake_cli.assert_has_calls(
+ [mock.call(*self.testData.LUN_PROPERTY_POOL_CMD(
+ testSrcVolume['name']), poll=False)])
+
+ def test_manage_existing_get_size(self):
+ get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
+ '-state', '-userCap', '-owner',
+ '-attachedSnapshot', '-poolName')
+ test_size = 2
+ commands = [get_lun_cmd]
+ results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)]
+ fake_cli = self.driverSetup(commands, results)
+ test_volume = self.testData.test_volume2.copy()
+ test_volume['host'] = "host@backendsec#Pool_02_SASFLASH"
+ get_size = self.driver.manage_existing_get_size(
+ test_volume,
+ self.testData.test_existing_ref)
+ expected = [mock.call(*get_lun_cmd, poll=True)]
+ self.assertEqual(test_size, get_size)
+ fake_cli.assert_has_calls(expected)
- def standard_fake_command_execute(self, *args, **kwargv):
- standard_commands = [
- self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
- self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
- self.testData.LUN_PROPERTY_ALL_CMD('vol-vol1'),
- self.testData.LUN_PROPERTY_ALL_CMD('snapshot1'),
- self.testData.POOL_PROPERTY_CMD]
+ def test_manage_existing_get_size_incorrect_pool(self):
+ """Test manage_existing function of driver with an invalid pool."""
- standard_results = [
- self.testData.LUN_PROPERTY('vol1'),
- self.testData.LUN_PROPERTY('vol2'),
- self.testData.LUN_PROPERTY('vol-vol1'),
- self.testData.LUN_PROPERTY('snapshot1'),
- self.testData.POOL_PROPERTY]
+ get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
+ '-state', '-userCap', '-owner',
+ '-attachedSnapshot', '-poolName')
+ commands = [get_lun_cmd]
+ results = [self.testData.LUN_PROPERTY('lun_name')]
+ fake_cli = self.driverSetup(commands, results)
+ test_volume = self.testData.test_volume2.copy()
+ test_volume['host'] = "host@backendsec#fake_pool"
+ ex = self.assertRaises(
+ exception.ManageExistingInvalidReference,
+ self.driver.manage_existing_get_size,
+ self.testData.test_volume_with_type,
+ self.testData.test_existing_ref)
+ self.assertTrue(
+ re.match(r'.*not managed by the host',
+ ex.msg))
+ expected = [mock.call(*get_lun_cmd, poll=True)]
+ fake_cli.assert_has_calls(expected)
- standard_default = SUCCEED
- for i in range(len(standard_commands)):
- if args == standard_commands[i]:
- return standard_results[i]
+ def test_manage_existing(self):
+ lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id,
+ '-newName', 'vol_with_type', '-o')
+ commands = [lun_rename_cmd]
+ results = [SUCCEED]
+ fake_cli = self.driverSetup(commands, results)
+ self.driver.manage_existing(
+ self.testData.test_volume_with_type,
+ self.testData.test_existing_ref)
+ expected = [mock.call(*lun_rename_cmd, poll=False)]
+ fake_cli.assert_has_calls(expected)
- return standard_default
+ @mock.patch(
+ "eventlet.event.Event.wait",
+ mock.Mock(return_value=None))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:provisioning': 'Compressed',
+ 'storagetype:pool': 'unit_test_pool'}))
+ def test_create_compression_volume(self):
+ commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+ self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+ self.testData.NDU_LIST_CMD]
+ results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+ self.testData.LUN_PROPERTY('vol_with_type', True),
+ self.testData.NDU_LIST_RESULT]
- def fake_setup_command_execute(self, *command, **kwargv):
- return self.testData.ALL_PORTS
+ fake_cli = self.driverSetup(commands, results)
- def fake_get_pool_properties(self, filter_option, properties=None):
- pool_info = {'pool_name': "unit_test_pool0",
- 'total_capacity_gb': 1000.0,
- 'free_capacity_gb': 1000.0
- }
- return pool_info
+ self.driver.cli.stats['compression_support'] = 'True'
+ self.driver.cli.enablers = ['-Compression',
+ '-Deduplication',
+ '-ThinProvisioning',
+ '-FAST']
+ # Case
+ self.driver.create_volume(self.testData.test_volume_with_type)
+ # Verification
+ expect_cmd = [
+ mock.call(*self.testData.LUN_CREATION_CMD(
+ 'vol_with_type', 1,
+ 'unit_test_pool',
+ 'compressed', None, False)),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
+ 'vol_with_type'), poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
+ 'vol_with_type'), poll=True),
+ mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
+ 1))]
+ fake_cli.assert_has_calls(expect_cmd)
- def fake_get_lun_properties(self, filter_option, properties=None):
- lun_info = {'lun_name': "vol1",
- 'lun_id': 1,
- 'pool': "unit_test_pool",
- 'attached_snapshot': "N/A",
- 'owner': "A",
- 'total_capacity_gb': 1.0,
- 'state': "Ready"}
- return lun_info
- def fake_safe_get(self, value):
- if value == "storage_vnx_pool_name":
- return "unit_test_pool"
- elif 'volume_backend_name' == value:
- return "namedbackend"
- else:
- return None
+class EMCVNXCLIDriverFCTestCase(DriverTestCaseBase):
+ def generateDriver(self, conf):
+ return EMCCLIFCDriver(configuration=conf)
@mock.patch(
"oslo_concurrency.processutils.execute",
self.testData.test_volume,
self.testData.connector)
- self.assertEqual(conn_info['data']['initiator_target_map'],
- EMCVNXCLIDriverTestData.i_t_map)
- self.assertEqual(conn_info['data']['target_wwn'],
- ['1122334455667777'])
+ self.assertEqual(EMCVNXCLIDriverTestData.i_t_map,
+ conn_info['data']['initiator_target_map'])
+ self.assertEqual(['1122334455667777'],
+ conn_info['data']['target_wwn'])
expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
self.testData.connector)
self.assertTrue('initiator_target_map' in connection_info['data'],
'initiator_target_map should be populated.')
- self.assertEqual(connection_info['data']['initiator_target_map'],
- EMCVNXCLIDriverTestData.i_t_map)
+ self.assertEqual(EMCVNXCLIDriverTestData.i_t_map,
+ connection_info['data']['initiator_target_map'])
def test_get_volume_stats(self):
- self.driverSetup()
+ commands = [self.testData.NDU_LIST_CMD,
+ self.testData.POOL_PROPERTY_W_FASTCACHE_CMD]
+ results = [self.testData.NDU_LIST_RESULT,
+ self.testData.POOL_PROPERTY_W_FASTCACHE]
+ self.driverSetup(commands, results)
stats = self.driver.get_volume_stats(True)
- self.assertTrue(stats['driver_version'] is not None,
- "driver_version is not returned")
- self.assertTrue(
- stats['free_capacity_gb'] == 3257.851,
- "free_capacity_gb is not correct")
- self.assertTrue(
- stats['reserved_percentage'] == 3,
- "reserved_percentage is not correct")
+
+ self.assertTrue(stats['driver_version'] == VERSION,
+ "driver_version is incorrect")
self.assertTrue(
stats['storage_protocol'] == 'FC',
- "storage_protocol is not correct")
- self.assertTrue(
- stats['total_capacity_gb'] == 3281.146,
- "total_capacity_gb is not correct")
+ "storage_protocol is incorrect")
self.assertTrue(
stats['vendor_name'] == "EMC",
- "vender name is not correct")
+ "vendor name is incorrect")
self.assertTrue(
stats['volume_backend_name'] == "namedbackend",
- "volume backend name is not correct")
- self.assertTrue(stats['location_info'] == "unit_test_pool|fakeSerial")
- self.assertTrue(
- stats['driver_version'] == "05.01.00",
- "driver version is incorrect.")
+ "volume backend name is incorrect")
+
+ pool_stats = stats['pools'][0]
+
+ expected_pool_stats = {
+ 'free_capacity_gb': 3257.851,
+ 'reserved_percentage': 3,
+ 'location_info': 'unit_test_pool|fakeSerial',
+ 'total_capacity_gb': 3281.146,
+ 'compression_support': 'True',
+ 'deduplication_support': 'True',
+ 'thinprovisioning_support': 'True',
+ 'consistencygroup_support': 'True',
+ 'pool_name': 'unit_test_pool',
+ 'fast_cache_enabled': 'True',
+ 'fast_support': 'True'}
+
+ self.assertEqual(expected_pool_stats, pool_stats)
def test_get_volume_stats_too_many_luns(self):
- commands = [self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
- results = [self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
+ commands = [self.testData.NDU_LIST_CMD,
+ self.testData.POOL_PROPERTY_W_FASTCACHE_CMD,
+ self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
+ results = [self.testData.NDU_LIST_RESULT,
+ self.testData.POOL_PROPERTY_W_FASTCACHE,
+ self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.check_max_pool_luns_threshold = True
stats = self.driver.get_volume_stats(True)
+ pool_stats = stats['pools'][0]
self.assertTrue(
- stats['free_capacity_gb'] == 0,
- "free_capacity_gb is not correct")
- expect_cmd = [
- mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
- poll=False)]
- fake_cli.assert_has_calls(expect_cmd)
+ pool_stats['free_capacity_gb'] == 0,
+ "free_capacity_gb is incorrect")
expect_cmd = [
mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
poll=False)]
self.driver.cli.check_max_pool_luns_threshold = False
stats = self.driver.get_volume_stats(True)
+ pool_stats = stats['pools'][0]
self.assertTrue(stats['driver_version'] is not None,
- "driver_version is not returned")
+ "driver_version is incorrect")
self.assertTrue(
- stats['free_capacity_gb'] == 3257.851,
- "free_capacity_gb is not correct")
+ pool_stats['free_capacity_gb'] == 3257.851,
+ "free_capacity_gb is incorrect")
def test_deregister_initiator(self):
fake_cli = self.driverSetup()
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
- self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.10")
+ self.assertEqual("10.10.10.10", self.cli_client.active_storage_ip)
expected = [
mock.call(*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ FAKE_COMMAND), check_exit_code=True)]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
- self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11")
+ self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
expected = [
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
- self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11")
+ self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
expected = [
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
- self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11")
+ self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
expected = [
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
- self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11")
+ self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
expected = [
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
-# Copyright (c) 2012 - 2014 EMC Corporation, Inc.
+# Copyright (c) 2012 - 2015 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
from cinder.volume.configuration import Configuration
from cinder.volume.drivers.san import san
from cinder.volume import manager
+from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
CONF = cfg.CONF
'Available Capacity *\(GBs\) *:\s*(.*)\s*',
'free_capacity_gb',
float)
+ POOL_FAST_CACHE = PropertyDescriptor(
+ '-fastcache',
+ 'FAST Cache:\s*(.*)\s*',
+ 'fast_cache_enabled',
+ lambda value: 'True' if value == 'Enabled' else 'False')
POOL_NAME = PropertyDescriptor(
'-name',
'Pool Name:\s*(.*)\s*',
LOG.info(_LI("iscsi_initiators: %s"), self.iscsi_initiator_map)
# extra spec constants
- self.pool_spec = 'storagetype:pool'
self.tiering_spec = 'storagetype:tiering'
self.provisioning_spec = 'storagetype:provisioning'
self.provisioning_values = {
properties, poll=poll)
return data
- def get_pool(self, name, poll=True):
+ def get_pool(self, name, properties=POOL_ALL, poll=True):
data = self.get_pool_properties(('-name', name),
+ properties=properties,
poll=poll)
return data
else:
return False
- def get_pool_list(self, poll=True):
+ def get_pool_list(self, properties=POOL_ALL, poll=True):
temp_cache = []
- cmd = ('storagepool', '-list', '-availableCap', '-state')
- out, rc = self.command_execute(*cmd, poll=poll)
+ list_cmd = ('storagepool', '-list')
+ for prop in properties:
+ list_cmd += (prop.option,)
+ output_properties = [self.POOL_NAME] + properties
+ out, rc = self.command_execute(*list_cmd, poll=poll)
if rc != 0:
- self._raise_cli_error(cmd, rc, out)
+ self._raise_cli_error(list_cmd, rc, out)
try:
for pool in out.split('\n\n'):
if len(pool.strip()) == 0:
continue
obj = {}
- obj['name'] = self._get_property_value(pool, self.POOL_NAME)
- obj['free_space'] = self._get_property_value(
- pool, self.POOL_FREE_CAPACITY)
+ for prop in output_properties:
+ obj[prop.key] = self._get_property_value(pool, prop)
temp_cache.append(obj)
except Exception as ex:
LOG.error(_LE("Error happened during storage pool querying, %s."),
ex)
# NOTE: Do not want to continue raise the exception
- # as the pools may temporarly unavailable
+ # as the pools may be temporarily unavailable
pass
return temp_cache
class EMCVnxCliBase(object):
"""This class defines the functions to use the native CLI functionality."""
- VERSION = '05.01.00'
+ VERSION = '05.02.00'
stats = {'driver_version': VERSION,
- 'free_capacity_gb': 'unknown',
- 'reserved_percentage': 0,
'storage_protocol': None,
- 'total_capacity_gb': 'unknown',
'vendor_name': 'EMC',
'volume_backend_name': None,
'compression_support': 'False',
if self.force_delete_lun_in_sg:
LOG.warning(_LW("force_delete_lun_in_storagegroup=True"))
- def get_target_storagepool(self, volume, source_volume_name=None):
+ def get_target_storagepool(self, volume, source_volume=None):
raise NotImplementedError
- def dumps_provider_location(self, pl_dict):
- return '|'.join([k + '^' + pl_dict[k] for k in pl_dict])
-
def get_array_serial(self):
if not self.array_serial:
self.array_serial = self._client.get_array_serial()
volume_size = snapshot['volume_size']
dest_volume_name = volume_name + '_dest'
- pool_name = self.get_target_storagepool(volume, source_volume_name)
+ pool_name = self.get_target_storagepool(volume, snapshot['volume'])
specs = self.get_volumetype_extraspecs(volume)
provisioning, tiering = self._get_extra_spec_value(specs)
store_spec = {
data = self._client.create_lun_with_advance_feature(
pool, volume_name, volume_size,
provisioning, tiering, volume['consistencygroup_id'], False)
- pl_dict = {'system': self.get_array_serial(),
- 'type': 'lun',
- 'id': str(data['lun_id'])}
model_update = {'provider_location':
- self.dumps_provider_location(pl_dict)}
- volume['provider_location'] = model_update['provider_location']
+ self._build_provider_location_for_lun(data['lun_id'])}
+
return model_update
def _volume_creation_check(self, volume):
- """This function will perform the check on the
- extra spec before the volume can be created. The
- check is a common check between the array based
- and pool based backend.
- """
-
+ """Checks on extra spec before the volume can be created."""
specs = self.get_volumetype_extraspecs(volume)
- provisioning, tiering = self._get_extra_spec_value(specs)
+ self._get_and_validate_extra_specs(specs)
+
+ def _get_and_validate_extra_specs(self, specs):
+ """Checks on extra specs combinations."""
+ if "storagetype:pool" in specs:
+ LOG.warning(_LW("Extra spec key 'storagetype:pool' is obsoleted "
+ "since driver version 5.1.0. This key will be "
+ "ignored."))
+ provisioning, tiering = self._get_extra_spec_value(specs)
# step 1: check extra spec value
if provisioning:
self._check_extra_spec_value(
self._client.tiering_values.keys())
# step 2: check extra spec combination
- self._check_extra_spec_combination(specs)
+ self._check_extra_spec_combination(provisioning, tiering)
+ return provisioning, tiering
def _check_extra_spec_value(self, extra_spec, valid_values):
"""Checks whether an extra spec's value is valid."""
return provisioning, tiering
- def _check_extra_spec_combination(self, extra_specs):
+ def _check_extra_spec_combination(self, provisioning, tiering):
"""Checks whether extra spec combination is valid."""
-
- provisioning, tiering = self._get_extra_spec_value(extra_specs)
enablers = self.enablers
-
# check provisioning and tiering
# deduplicated and tiering can not be both enabled
if provisioning == 'deduplicated' and tiering is not None:
"target_array_serial."))
return false_ret
- if len(target_pool_name) == 0:
- # if retype, try to get the pool of the volume
- # when it's array-based
- if new_type:
- if 'storagetype:pool' in new_type['extra_specs']\
- and new_type['extra_specs']['storagetype:pool']\
- is not None:
- target_pool_name = \
- new_type['extra_specs']['storagetype:pool']
- else:
- target_pool_name = self._client.get_pool_name_of_lun(
- volume['name'])
-
- if len(target_pool_name) == 0:
- LOG.debug("Skip storage-assisted migration because "
- "it doesn't support array backend .")
- return false_ret
# source and destination should be on same array
array_serial = self.get_array_serial()
if target_array_serial != array_serial:
'target and source backend are not managing'
'the same array.')
return false_ret
- # same protocol should be used if volume is in-use
+
+ if len(target_pool_name) == 0:
+ # Destination host is using a legacy driver
+ LOG.warning(_LW("Didn't get the pool information of the "
+ "host %(s). Storage assisted Migration is not "
+ "supported. The host may be using a legacy "
+ "driver."),
+ host['name'])
+ return false_ret
+
+ # Same protocol should be used if volume is in-use
if host['capabilities']['storage_protocol'] != self.protocol \
and self._get_original_status(volume) == 'in-use':
LOG.debug('Skip storage-assisted migration because '
def retype(self, ctxt, volume, new_type, diff, host):
new_specs = new_type['extra_specs']
- new_provisioning, new_tiering = self._get_extra_spec_value(
- new_specs)
- # validate new_type
- if new_provisioning:
- self._check_extra_spec_value(
- new_provisioning,
- self._client.provisioning_values.keys())
- if new_tiering:
- self._check_extra_spec_value(
- new_tiering,
- self._client.tiering_values.keys())
- self._check_extra_spec_combination(new_specs)
+ new_provisioning, new_tiering = (
+ self._get_and_validate_extra_specs(new_specs))
- # check what changes are needed
+ # Check what changes are needed
migration, tiering_change = self.determine_changes_when_retype(
volume, new_type, host)
- # reject if volume has snapshot when migration is needed
+ # Reject if volume has snapshot when migration is needed
if migration and self._client.check_lun_has_snap(
self.get_lun_id(volume)):
LOG.debug('Driver is not able to do retype because the volume '
return False
if migration:
- # check whether the migration is valid
+ # Check whether the migration is valid
is_valid, target_pool_name = (
self._is_valid_for_storage_assisted_migration(
volume, host, new_type))
'retype.'))
return False
else:
- # migration is invalid
+ # Migration is invalid
LOG.debug('Driver is not able to do retype due to '
'storage-assisted migration is not valid '
'in this situation.')
return False
- elif not migration and tiering_change:
- # modify lun to change tiering policy
+ elif tiering_change:
+ # Modify lun to change tiering policy
self._client.modify_lun_tiering(volume['name'], new_tiering)
return True
else:
old_specs = self.get_volumetype_extraspecs(volume)
old_provisioning, old_tiering = self._get_extra_spec_value(
old_specs)
- old_pool = self.get_specific_extra_spec(
- old_specs,
- self._client.pool_spec)
new_specs = new_type['extra_specs']
new_provisioning, new_tiering = self._get_extra_spec_value(
new_specs)
- new_pool = self.get_specific_extra_spec(
- new_specs,
- self._client.pool_spec)
if volume['host'] != host['host'] or \
old_provisioning != new_provisioning:
migration = True
- elif new_pool and new_pool != old_pool:
- migration = True
if new_tiering != old_tiering:
tiering_change = True
return False
return True
+ def _build_pool_stats(self, pool):
+ pool_stats = {}
+ pool_stats['pool_name'] = pool['pool_name']
+ pool_stats['total_capacity_gb'] = pool['total_capacity_gb']
+ pool_stats['reserved_percentage'] = 0
+ pool_stats['free_capacity_gb'] = pool['free_capacity_gb']
+ # Some extra capacity will be used by meta data of pool LUNs.
+ # The overhead is about LUN_Capacity * 0.02 + 3 GB
+ # reserved_percentage will be used to make sure the scheduler
+ # takes the overhead into consideration.
+ # Assume that all the remaining capacity is to be used to create
+ # a thick LUN, reserved_percentage is estimated as follows:
+ reserved = (((0.02 * pool['free_capacity_gb'] + 3) /
+ (1.02 * pool['total_capacity_gb'])) * 100)
+ pool_stats['reserved_percentage'] = int(math.ceil(min(reserved, 100)))
+ if self.check_max_pool_luns_threshold:
+ pool_feature = self._client.get_pool_feature_properties(poll=False)
+ if (pool_feature['max_pool_luns']
+ <= pool_feature['total_pool_luns']):
+ LOG.warning(_LW("Maximum number of Pool LUNs, %s, "
+ "have been created. "
+ "No more LUN creation can be done."),
+ pool_feature['max_pool_luns'])
+ pool_stats['free_capacity_gb'] = 0
+
+ array_serial = self.get_array_serial()
+ pool_stats['location_info'] = ('%(pool_name)s|%(array_serial)s' %
+ {'pool_name': pool['pool_name'],
+ 'array_serial': array_serial})
+ # Check if this pool's fast_cache is enabled
+ if 'fast_cache_enabled' not in pool:
+ pool_stats['fast_cache_enabled'] = 'False'
+ else:
+ pool_stats['fast_cache_enabled'] = pool['fast_cache_enabled']
+
+ # Copy advanced feature stats from backend stats
+ pool_stats['compression_support'] = self.stats['compression_support']
+ pool_stats['fast_support'] = self.stats['fast_support']
+ pool_stats['deduplication_support'] = (
+ self.stats['deduplication_support'])
+ pool_stats['thinprovisioning_support'] = (
+ self.stats['thinprovisioning_support'])
+ pool_stats['consistencygroup_support'] = (
+ self.stats['consistencygroup_support'])
+
+ return pool_stats
+
+ @log_enter_exit
def update_volume_stats(self):
- """Update the common status share with pool and
- array backend.
- """
+ """Gets the common stats shared by pool and array backend."""
if not self.determine_all_enablers_exist(self.enablers):
self.enablers = self._client.get_enablers_on_array()
- if '-Compression' in self.enablers:
- self.stats['compression_support'] = 'True'
- else:
- self.stats['compression_support'] = 'False'
- if '-FAST' in self.enablers:
- self.stats['fast_support'] = 'True'
- else:
- self.stats['fast_support'] = 'False'
- if '-Deduplication' in self.enablers:
- self.stats['deduplication_support'] = 'True'
- else:
- self.stats['deduplication_support'] = 'False'
- if '-ThinProvisioning' in self.enablers:
- self.stats['thinprovisioning_support'] = 'True'
- else:
- self.stats['thinprovisioning_support'] = 'False'
- if '-FASTCache' in self.enablers:
- self.stats['fast_cache_enabled'] = 'True'
- else:
- self.stats['fast_cache_enabled'] = 'False'
- if '-VNXSnapshots' in self.enablers:
- self.stats['consistencygroup_support'] = 'True'
- else:
- self.stats['consistencygroup_support'] = 'False'
+
+ self.stats['compression_support'] = (
+ 'True' if '-Compression' in self.enablers else 'False')
+
+ self.stats['fast_support'] = (
+ 'True' if '-FAST' in self.enablers else 'False')
+
+ self.stats['deduplication_support'] = (
+ 'True' if '-Deduplication' in self.enablers else 'False')
+
+ self.stats['thinprovisioning_support'] = (
+ 'True' if '-ThinProvisioning' in self.enablers else 'False')
+
+ self.stats['consistencygroup_support'] = (
+ 'True' if '-VNXSnapshots' in self.enablers else 'False')
if self.protocol == 'iSCSI':
self.iscsi_targets = self._client.get_iscsi_targets(poll=False)
4. Start a migration between the SMP and the temp lun.
"""
self._volume_creation_check(volume)
- array_serial = self.get_array_serial()
flow_name = 'create_volume_from_snapshot'
work_flow = linear_flow.Flow(flow_name)
store_spec = self._construct_store_spec(volume, snapshot)
store=store_spec)
flow_engine.run()
new_lun_id = flow_engine.storage.fetch('new_lun_id')
- pl_dict = {'system': array_serial,
- 'type': 'lun',
- 'id': str(new_lun_id)}
model_update = {'provider_location':
- self.dumps_provider_location(pl_dict)}
- volume['provider_location'] = model_update['provider_location']
+ self._build_provider_location_for_lun(new_lun_id)}
+ volume_host = volume['host']
+ host = vol_utils.extract_host(volume_host, 'backend')
+ host_and_pool = vol_utils.append_host(host, store_spec['pool_name'])
+ if volume_host != host_and_pool:
+ model_update['host'] = host_and_pool
+
return model_update
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
self._volume_creation_check(volume)
- array_serial = self.get_array_serial()
source_volume_name = src_vref['name']
source_lun_id = self.get_lun_id(src_vref)
volume_size = src_vref['size']
else:
self.delete_snapshot(snapshot)
- pl_dict = {'system': array_serial,
- 'type': 'lun',
- 'id': str(new_lun_id)}
model_update = {'provider_location':
- self.dumps_provider_location(pl_dict)}
+ self._build_provider_location_for_lun(new_lun_id)}
+ volume_host = volume['host']
+ host = vol_utils.extract_host(volume_host, 'backend')
+ host_and_pool = vol_utils.append_host(host, store_spec['pool_name'])
+ if volume_host != host_and_pool:
+ model_update['host'] = host_and_pool
+
return model_update
+ def dumps_provider_location(self, pl_dict):
+ return '|'.join([k + '^' + pl_dict[k] for k in pl_dict])
+
+ def _build_provider_location_for_lun(self, lun_id):
+ pl_dict = {'system': self.get_array_serial(),
+ 'type': 'lun',
+ 'id': six.text_type(lun_id),
+ 'version': self.VERSION}
+ return self.dumps_provider_location(pl_dict)
+
+ def _extract_provider_location_for_lun(self, provider_location, key='id'):
+ """Extacts value of the specified field from provider_location string.
+
+ :param provider_location: provider_location string
+ :param key: field name of the value that to be extracted
+ :return: value of the specified field if it exists, otherwise,
+ None is returned
+ """
+
+ kvps = provider_location.split('|')
+ for kvp in kvps:
+ fields = kvp.split('^')
+ if len(fields) == 2 and fields[0] == key:
+ return fields[1]
+
def create_consistencygroup(self, context, group):
"""Creates a consistency group."""
LOG.info(_LI('Start to create consistency group: %(group_name)s '
def get_lun_id(self, volume):
lun_id = None
try:
- if volume.get('provider_location') is not None:
- lun_id = int(
- volume['provider_location'].split('|')[2].split('^')[1])
- if not lun_id:
+ provider_location = volume.get('provider_location')
+ if provider_location:
+ lun_id = self._extract_provider_location_for_lun(
+ provider_location,
+ 'id')
+ if lun_id:
+ lun_id = int(lun_id)
+ else:
LOG.debug('Lun id is not stored in provider location, '
'query it.')
lun_id = self._client.get_lun_by_name(volume['name'])['lun_id']
return do_terminate_connection()
def manage_existing_get_size(self, volume, ref):
- """Return size of volume to be managed by manage_existing."""
+ """Returns size of volume to be managed by manage_existing."""
# Check that the reference is valid
if 'id' not in ref:
reason=reason)
# Check for existence of the lun
- data = self._client.get_lun_by_id(ref['id'])
+ data = self._client.get_lun_by_id(
+ ref['id'],
+ properties=self._client.LUN_WITH_POOL)
if data is None:
- reason = _('Find no lun with the specified lun_id.')
+ reason = _('Find no lun with the specified id %s.') % ref['id']
+ raise exception.ManageExistingInvalidReference(existing_ref=ref,
+ reason=reason)
+
+ pool = self.get_target_storagepool(volume, None)
+ if pool and data['pool'] != pool:
+ reason = (_('The input lun %(lun_id)s is in pool %(poolname)s '
+ 'which is not managed by the host %(host)s.')
+ % {'lun_id': ref['id'],
+ 'poolname': data['pool'],
+ 'host': volume['host']})
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
return data['total_capacity_gb']
"""
self._client.lun_rename(ref['id'], volume['name'])
+ model_update = {'provider_location':
+ self._build_provider_location_for_lun(ref['id'])}
+
+ return model_update
def find_iscsi_protocol_endpoints(self, device_sp):
"""Returns the iSCSI initiators for a SP."""
return specs
+ def get_pool(self, volume):
+ """Returns the pool name of a volume."""
+
+ data = self._client.get_lun_by_name(volume['name'],
+ [self._client.LUN_POOL],
+ poll=False)
+ return data.get(self._client.LUN_POOL.key)
+
@decorate_all_methods(log_enter_exit)
class EMCVnxCliPool(EMCVnxCliBase):
self._client.get_pool(self.storage_pool)
def get_target_storagepool(self,
- volume=None,
- source_volume_name=None):
- pool_spec_id = "storagetype:pool"
- if volume is not None:
- specs = self.get_volumetype_extraspecs(volume)
- if specs and pool_spec_id in specs:
- expect_pool = specs[pool_spec_id].strip()
- if expect_pool != self.storage_pool:
- msg = _("Storage pool %s is not supported"
- " by this Cinder Volume") % expect_pool
- LOG.error(msg)
- raise exception.VolumeBackendAPIException(data=msg)
+ volume,
+ source_volume=None):
return self.storage_pool
def update_volume_stats(self):
"""Retrieves stats info."""
- self.stats = super(EMCVnxCliPool, self).update_volume_stats()
- pool = self._client.get_pool(self.get_target_storagepool(),
+ super(EMCVnxCliPool, self).update_volume_stats()
+ if '-FASTCache' in self.enablers:
+ properties = [self._client.POOL_FREE_CAPACITY,
+ self._client.POOL_TOTAL_CAPACITY,
+ self._client.POOL_FAST_CACHE]
+ else:
+ properties = [self._client.POOL_FREE_CAPACITY,
+ self._client.POOL_TOTAL_CAPACITY]
+
+ pool = self._client.get_pool(self.storage_pool,
+ properties=properties,
poll=False)
- self.stats['total_capacity_gb'] = pool['total_capacity_gb']
- self.stats['free_capacity_gb'] = pool['free_capacity_gb']
- # Some extra capacity will be used by meta data of pool LUNs.
- # The overhead is about LUN_Capacity * 0.02 + 3 GB
- # reserved_percentage will be used to make sure the scheduler
- # takes the overhead into consideration
- # Assume that all the remaining capacity is to be used to create
- # a thick LUN, reserved_percentage is estimated as follows:
- reserved = (((0.02 * pool['free_capacity_gb'] + 3) /
- (1.02 * pool['total_capacity_gb'])) * 100)
- self.stats['reserved_percentage'] = int(math.ceil(min(reserved, 100)))
- if self.check_max_pool_luns_threshold:
- pool_feature = self._client.get_pool_feature_properties(poll=False)
- if (pool_feature['max_pool_luns']
- <= pool_feature['total_pool_luns']):
- LOG.warning(_LW("Maximum number of Pool LUNs, %s, "
- "have been created. "
- "No more LUN creation can be done."),
- pool_feature['max_pool_luns'])
- self.stats['free_capacity_gb'] = 0
- array_serial = self._client.get_array_serial()
- self.stats['location_info'] = ('%(pool_name)s|%(array_serial)s' %
- {'pool_name': self.storage_pool,
- 'array_serial':
- array_serial['array_serial']})
- # check if this pool's fast_cache is really enabled
- if self.stats['fast_cache_enabled'] == 'True' and \
- not self._client.is_pool_fastcache_enabled(self.storage_pool):
- self.stats['fast_cache_enabled'] = 'False'
+ self.stats['pools'] = [self._build_pool_stats(pool)]
return self.stats
- def manage_existing_get_size(self, volume, ref):
- """Returns size of volume to be managed by manage_existing."""
-
- # Check that the reference is valid
- if 'id' not in ref:
- reason = _('Reference must contain lun_id element.')
- raise exception.ManageExistingInvalidReference(
- existing_ref=ref,
- reason=reason)
- # Check for existence of the lun
- data = self._client.get_lun_by_id(
- ref['id'],
- properties=self._client.LUN_WITH_POOL)
- if data is None:
- reason = _('Cannot find the lun with LUN id %s.') % ref['id']
- raise exception.ManageExistingInvalidReference(existing_ref=ref,
- reason=reason)
- if data['pool'] != self.storage_pool:
- reason = _('The input lun is not in a manageable pool backend '
- 'by cinder')
- raise exception.ManageExistingInvalidReference(existing_ref=ref,
- reason=reason)
- return data['total_capacity_gb']
-
@decorate_all_methods(log_enter_exit)
class EMCVnxCliArray(EMCVnxCliBase):
def __init__(self, prtcl, configuration):
super(EMCVnxCliArray, self).__init__(prtcl,
configuration=configuration)
- self._update_pool_cache()
-
- def _update_pool_cache(self):
- LOG.debug("Updating Pool Cache")
- self.pool_cache = self._client.get_pool_list(poll=False)
- def get_target_storagepool(self, volume, source_volume_name=None):
- """Find the storage pool for given volume."""
- pool_spec_id = "storagetype:pool"
- specs = self.get_volumetype_extraspecs(volume)
- if specs and pool_spec_id in specs:
- return specs[pool_spec_id]
- elif source_volume_name:
- data = self._client.get_lun_by_name(source_volume_name,
- [self._client.LUN_POOL])
+ def get_target_storagepool(self, volume, source_volume=None):
+ pool = vol_utils.extract_host(volume['host'], 'pool')
+
+ # For new created volume that is not from snapshot or cloned,
+ # just use the pool selected by scheduler
+ if not source_volume:
+ return pool
+
+ # For volume created from snapshot or cloned from volume, the pool to
+ # use depends on the source volume version. If the source volume is
+ # created by older version of driver which doesn't support pool
+ # scheduler, use the pool where the source volume locates. Otherwise,
+ # use the pool selected by scheduler
+ provider_location = source_volume.get('provider_location')
+
+ if (provider_location and
+ self._extract_provider_location_for_lun(provider_location,
+ 'version')):
+ return pool
+ else:
+ LOG.warning(_LW("The source volume is a legacy volume. "
+ "Create volume in the pool where the source "
+ "volume %s is created."),
+ source_volume['name'])
+ data = self._client.get_lun_by_name(source_volume['name'],
+ [self._client.LUN_POOL],
+ poll=False)
if data is None:
- msg = _("Failed to find storage pool for source volume %s") \
- % source_volume_name
+ msg = (_("Failed to find storage pool for source volume %s.")
+ % source_volume['name'])
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return data[self._client.LUN_POOL.key]
- else:
- if len(self.pool_cache) > 0:
- pools = sorted(self.pool_cache,
- key=lambda po: po['free_space'],
- reverse=True)
- return pools[0]['name']
-
- msg = (_("Failed to find storage pool to create volume %s.")
- % volume['name'])
- LOG.error(msg)
- raise exception.VolumeBackendAPIException(data=msg)
def update_volume_stats(self):
- """Retrieve stats info."""
- self.stats = super(EMCVnxCliArray, self).update_volume_stats()
- self._update_pool_cache()
- self.stats['total_capacity_gb'] = 'unknown'
- self.stats['free_capacity_gb'] = 'unknown'
- array_serial = self._client.get_array_serial()
- self.stats['location_info'] = ('%(pool_name)s|%(array_serial)s' %
- {'pool_name': '',
- 'array_serial':
- array_serial['array_serial']})
- self.stats['fast_cache_enabled'] = 'unknown'
+ """Retrieves stats info."""
+ super(EMCVnxCliArray, self).update_volume_stats()
+ if '-FASTCache' in self.enablers:
+ properties = [self._client.POOL_FREE_CAPACITY,
+ self._client.POOL_TOTAL_CAPACITY,
+ self._client.POOL_FAST_CACHE]
+ else:
+ properties = [self._client.POOL_FREE_CAPACITY,
+ self._client.POOL_TOTAL_CAPACITY]
+ pool_list = self._client.get_pool_list(properties, False)
+
+ self.stats['pools'] = map(lambda pool: self._build_pool_stats(pool),
+ pool_list)
return self.stats
class CreateDestLunTask(task.Task):
"""Creates a destination lun for migration.
- Reversion strategy: Detach the temp lun.
+ Reversion strategy: Delete the temp destination lun.
"""
def __init__(self):
super(CreateDestLunTask, self).__init__(provides='lun_data')