def test_create_destroy(self):
FAKE_POOLS = [{'label': 'DDP', 'volumeGroupRef': 'test'}]
self.library._get_storage_pools = mock.Mock(return_value=FAKE_POOLS)
+ self.library._client.features = mock.Mock()
self.mock_object(self.library._client, '_get_resource_url', mock.Mock(
return_value=fakes.FAKE_ENDPOINT_HTTP))
self.mock_object(self.library._client, '_eval_response')
self.driver.create_volume(self.volume)
self.library._create_volume.assert_called_with(
- 'DDP', self.fake_eseries_volume_label, self.volume['size'])
+ 'DDP', self.fake_eseries_volume_label, self.volume['size'], {})
def test_create_volume_no_pool_provided_by_scheduler(self):
volume = copy.deepcopy(self.volume)
'label': 'FOREIGN HOST GROUP',
}
-STORAGE_POOLS = [
+SSC_POOLS = [
{
- "sequenceNum": 1,
- "offline": False,
- "raidLevel": "raidDiskPool",
- "worldWideName": "60080E50002998A00000A387558A7514",
- "volumeGroupRef": "0400000060080E50002998A00000A387558A7514",
- "reserved1": "000000000000000000000000",
- "reserved2": "",
- "trayLossProtection": False,
- "label": "DDP",
- "state": "complete",
- "spindleSpeedMatch": True,
- "spindleSpeed": 10033,
- "isInaccessible": False,
- "securityType": "none",
- "drawerLossProtection": False,
- "protectionInformationCapable": False,
- "protectionInformationCapabilities": {
+ "poolId": "0400000060080E5000290D8000009C9955828DD2",
+ "name": "DDP",
+ "pool": {
+ "sequenceNum": 2,
+ "offline": False,
+ "raidLevel": "raidDiskPool",
+ "worldWideName": "60080E5000290D8000009C9955828DD2",
+ "volumeGroupRef": "0400000060080E5000290D8000009C9955828DD2",
+ "reserved1": "000000000000000000000000",
+ "reserved2": "",
+ "trayLossProtection": False,
+ "label": "DDP",
+ "state": "complete",
+ "spindleSpeedMatch": True,
+ "spindleSpeed": 7200,
+ "isInaccessible": False,
+ "securityType": "none",
+ "drawerLossProtection": True,
"protectionInformationCapable": False,
- "protectionType": "type0Protection"
- },
- "volumeGroupData": {
- "type": "diskPool",
- "diskPoolData": {
- "reconstructionReservedDriveCount": 1,
- "reconstructionReservedAmt": "138512695296",
- "reconstructionReservedDriveCountCurrent": 1,
- "poolUtilizationWarningThreshold": 50,
- "poolUtilizationCriticalThreshold": 85,
- "poolUtilizationState": "utilizationOptimal",
- "unusableCapacity": "0",
- "degradedReconstructPriority": "high",
- "criticalReconstructPriority": "highest",
- "backgroundOperationPriority": "low",
- "allocGranularity": "4294967296"
- }
+ "protectionInformationCapabilities": {
+ "protectionInformationCapable": True,
+ "protectionType": "type2Protection"
+ },
+ "volumeGroupData": {
+ "type": "diskPool",
+ "diskPoolData": {
+ "reconstructionReservedDriveCount": 1,
+ "reconstructionReservedAmt": "2992518463488",
+ "reconstructionReservedDriveCountCurrent": 1,
+ "poolUtilizationWarningThreshold": 100,
+ "poolUtilizationCriticalThreshold": 100,
+ "poolUtilizationState": "utilizationOptimal",
+ "unusableCapacity": "0",
+ "degradedReconstructPriority": "high",
+ "criticalReconstructPriority": "highest",
+ "backgroundOperationPriority": "low",
+ "allocGranularity": "4294967296"
+ }
+ },
+ "usage": "standard",
+ "driveBlockFormat": "allNative",
+ "reservedSpaceAllocated": True,
+ "usedSpace": "13653701033984",
+ "totalRaidedSpace": "23459111370752",
+ "extents": [
+ {
+ "sectorOffset": "0",
+ "rawCapacity": "9805410336768",
+ "raidLevel": "raidDiskPool",
+ "volumeGroupRef":
+ "0400000060080E5000290D8000009C9955828DD2",
+ "freeExtentRef":
+ "0301000060080E5000290D8000009C9955828DD2",
+ "reserved1": "000000000000000000000000",
+ "reserved2": ""
+ }
+ ],
+ "largestFreeExtentSize": "9805410336768",
+ "raidStatus": "optimal",
+ "freeSpace": "9805410336768",
+ "drivePhysicalType": "sas",
+ "driveMediaType": "hdd",
+ "normalizedSpindleSpeed": "spindleSpeed7200",
+ "id": "0400000060080E5000290D8000009C9955828DD2",
+ "diskPool": True,
+ "name": "DDP"
},
- "usage": "standard",
- "driveBlockFormat": "allNative",
- "reservedSpaceAllocated": True,
- "usedSpace": "55834574848",
- "totalRaidedSpace": "1060856922112",
- "extents": [
- {
- "sectorOffset": "0",
- "rawCapacity": "1005022347264",
- "raidLevel": "raidDiskPool",
- "volumeGroupRef": "0400000060080E50002998A00000A387558A7514",
- "freeExtentRef": "0301000060080E50002998A00000A387558A7514",
- "reserved1": "000000000000000000000000",
- "reserved2": ""
- }
- ],
- "largestFreeExtentSize": "1005022347264",
- "raidStatus": "optimal",
- "freeSpace": "1005022347264",
- "drivePhysicalType": "sas",
- "driveMediaType": "hdd",
- "normalizedSpindleSpeed": "spindleSpeed10k",
- "id": "0400000060080E50002998A00000A387558A7514",
- "diskPool": True
+ "flashCacheCapable": True,
+ "dataAssuranceCapable": True,
+ "encrypted": False,
+ "thinProvisioningCapable": True,
+ "spindleSpeed": "spindleSpeed7200",
+ "raidLevel": "raidDiskPool",
+ "availableFreeExtentCapacities": [
+ "9805410336768"
+ ]
},
{
- "sequenceNum": 1,
- "offline": False,
- "raidLevel": "raid5",
- "worldWideName": "60080E500029E0B4000059A0553E1725",
- "volumeGroupRef": "0400000060080E500029E0B4000059A0553E1725",
- "reserved1": "000000000000000000000000",
- "reserved2": "",
- "trayLossProtection": False,
- "label": "test_vg_1",
- "state": "complete",
- "spindleSpeedMatch": False,
- "spindleSpeed": 10000,
- "isInaccessible": False,
- "securityType": "enabled",
- "drawerLossProtection": False,
- "protectionInformationCapable": False,
- "protectionInformationCapabilities": {
+ "poolId": "0400000060080E5000290D8000009CBA55828E96",
+ "name": "pool_raid1",
+ "pool": {
+ "sequenceNum": 6,
+ "offline": False,
+ "raidLevel": "raid1",
+ "worldWideName": "60080E5000290D8000009CBA55828E96",
+ "volumeGroupRef": "0400000060080E5000290D8000009CBA55828E96",
+ "reserved1": "000000000000000000000000",
+ "reserved2": "",
+ "trayLossProtection": False,
+ "label": "pool_raid1",
+ "state": "complete",
+ "spindleSpeedMatch": True,
+ "spindleSpeed": 10000,
+ "isInaccessible": False,
+ "securityType": "none",
+ "drawerLossProtection": True,
"protectionInformationCapable": False,
- "protectionType": "type0Protection"
- },
- "volumeGroupData": {
- "type": "unknown",
- "diskPoolData": None
+ "protectionInformationCapabilities": {
+ "protectionInformationCapable": True,
+ "protectionType": "type2Protection"
+ },
+ "volumeGroupData": {
+ "type": "unknown",
+ "diskPoolData": None
+ },
+ "usage": "standard",
+ "driveBlockFormat": "allNative",
+ "reservedSpaceAllocated": True,
+ "usedSpace": "2978559819776",
+ "totalRaidedSpace": "6662444097536",
+ "extents": [
+ {
+ "sectorOffset": "387891200",
+ "rawCapacity": "3683884277760",
+ "raidLevel": "raid1",
+ "volumeGroupRef":
+ "0400000060080E5000290D8000009CBA55828E96",
+ "freeExtentRef":
+ "030000B360080E5000290D8000009CBA55828E96",
+ "reserved1": "000000000000000000000000",
+ "reserved2": ""
+ }
+ ],
+ "largestFreeExtentSize": "3683884277760",
+ "raidStatus": "optimal",
+ "freeSpace": "3683884277760",
+ "drivePhysicalType": "sas",
+ "driveMediaType": "hdd",
+ "normalizedSpindleSpeed": "spindleSpeed10k",
+ "id": "0400000060080E5000290D8000009CBA55828E96",
+ "diskPool": False,
+ "name": "pool_raid1"
},
- "usage": "standard",
- "driveBlockFormat": "allNative",
- "reservedSpaceAllocated": False,
- "usedSpace": "28366077952",
- "totalRaidedSpace": "292557733888",
- "extents": [
- {
- "sectorOffset": "27701248",
- "rawCapacity": "264191655936",
- "raidLevel": "raid5",
- "volumeGroupRef": "0400000060080E500029E0B4000059A0553E1725",
- "freeExtentRef": "0300001B60080E500029E0B4000059A0553E1725",
- "reserved1": "000000000000000000000000",
- "reserved2": ""
- }
- ],
- "largestFreeExtentSize": "264191655936",
- "raidStatus": "optimal",
- "freeSpace": "264191655936",
- "drivePhysicalType": "sas",
- "driveMediaType": "ssd",
- "normalizedSpindleSpeed": "spindleSpeed10k",
- "id": "0400000060080E500029E0B4000059A0553E1725",
- "diskPool": False
+ "flashCacheCapable": False,
+ "dataAssuranceCapable": True,
+ "encrypted": False,
+ "thinProvisioningCapable": False,
+ "spindleSpeed": "spindleSpeed10k",
+ "raidLevel": "raid1",
+ "availableFreeExtentCapacities": [
+ "3683884277760"
+ ]
},
{
- "sequenceNum": 3,
- "offline": False,
- "raidLevel": "raid6",
- "worldWideName": "60080E500029E0B4000059A2553E1739",
- "volumeGroupRef": "0400000060080E500029E0B4000059A2553E1739",
- "reserved1": "000000000000000000000000",
- "reserved2": "",
- "trayLossProtection": False,
- "label": "test_vg_2",
- "state": "complete",
- "spindleSpeedMatch": True,
- "spindleSpeed": 10020,
- "isInaccessible": False,
- "securityType": "enabled",
- "drawerLossProtection": False,
- "protectionInformationCapable": False,
- "protectionInformationCapabilities": {
+ "poolId": "0400000060080E5000290D8000009CAB55828E51",
+ "name": "pool_raid6",
+ "pool": {
+ "sequenceNum": 3,
+ "offline": False,
+ "raidLevel": "raid6",
+ "worldWideName": "60080E5000290D8000009CAB55828E51",
+ "volumeGroupRef": "0400000060080E5000290D8000009CAB55828E51",
+ "reserved1": "000000000000000000000000",
+ "reserved2": "",
+ "trayLossProtection": False,
+ "label": "pool_raid6",
+ "state": "complete",
+ "spindleSpeedMatch": True,
+ "spindleSpeed": 15000,
+ "isInaccessible": False,
+ "securityType": "enabled",
+ "drawerLossProtection": False,
"protectionInformationCapable": False,
- "protectionType": "type2Protection"
- },
- "volumeGroupData": {
- "type": "unknown",
- "diskPoolData": None
+ "protectionInformationCapabilities": {
+ "protectionInformationCapable": True,
+ "protectionType": "type2Protection"
+ },
+ "volumeGroupData": {
+ "type": "unknown",
+ "diskPoolData": None
+ },
+ "usage": "standard",
+ "driveBlockFormat": "allNative",
+ "reservedSpaceAllocated": True,
+ "usedSpace": "16413217521664",
+ "totalRaidedSpace": "16637410312192",
+ "extents": [
+ {
+ "sectorOffset": "1144950784",
+ "rawCapacity": "224192790528",
+ "raidLevel": "raid6",
+ "volumeGroupRef":
+ "0400000060080E5000290D8000009CAB55828E51",
+ "freeExtentRef":
+ "0300005960080E5000290D8000009CAB55828E51",
+ "reserved1": "000000000000000000000000",
+ "reserved2": ""
+ }
+ ],
+ "largestFreeExtentSize": "224192790528",
+ "raidStatus": "optimal",
+ "freeSpace": "224192790528",
+ "drivePhysicalType": "sas",
+ "driveMediaType": "hdd",
+ "normalizedSpindleSpeed": "spindleSpeed15k",
+ "id": "0400000060080E5000290D8000009CAB55828E51",
+ "diskPool": False,
+ "name": "pool_raid6"
},
- "usage": "standard",
- "driveBlockFormat": "allNative",
- "reservedSpaceAllocated": False,
- "usedSpace": "27399710720",
- "totalRaidedSpace": "1798769641472",
- "extents": [
- {
- "sectorOffset": "17839360",
- "rawCapacity": "1771369930752",
- "raidLevel": "raid6",
- "volumeGroupRef": "0400000060080E500029E0B4000059A2553E1739",
- "freeExtentRef": "0300001360080E500029E0B4000059A2553E1739",
- "reserved1": "000000000000000000000000",
- "reserved2": ""
- }
- ],
- "largestFreeExtentSize": "1771369930752",
- "raidStatus": "optimal",
- "freeSpace": "1771369930752",
- "drivePhysicalType": "sas",
- "driveMediaType": "hdd",
- "normalizedSpindleSpeed": "spindleSpeed10k",
- "id": "0400000060080E500029E0B4000059A2553E1739",
- "diskPool": False
+ "flashCacheCapable": False,
+ "dataAssuranceCapable": True,
+ "encrypted": True,
+ "thinProvisioningCapable": False,
+ "spindleSpeed": "spindleSpeed15k",
+ "raidLevel": "raid6",
+ "availableFreeExtentCapacities": [
+ "224192790528"
+ ]
}
]
+STORAGE_POOLS = [ssc_pool['pool'] for ssc_pool in SSC_POOLS]
+
VOLUME = {
'extremeProtection': False,
'pitBaseVolume': True,
import ddt
import mock
+from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.eseries import fakes as \
eseries_fake
+
from cinder.volume.drivers.netapp.eseries import client
+from cinder.volume.drivers.netapp import utils as na_utils
@ddt.ddt
eseries_fake.FAKE_ABOUT_RESPONSE['version']),
eseries_info)
+ def test_list_ssc_storage_pools(self):
+ self.my_client.features = mock.Mock()
+ self.my_client._invoke = mock.Mock(
+ return_value=eseries_fake.SSC_POOLS)
+
+ pools = client.RestClient.list_ssc_storage_pools(self.my_client)
+
+ self.assertEqual(eseries_fake.SSC_POOLS, pools)
+
+ def test_get_ssc_storage_pool(self):
+ fake_pool = eseries_fake.SSC_POOLS[0]
+ self.my_client.features = mock.Mock()
+ self.my_client._invoke = mock.Mock(
+ return_value=fake_pool)
+
+ pool = client.RestClient.get_ssc_storage_pool(self.my_client,
+ fake_pool['poolId'])
+
+ self.assertEqual(fake_pool, pool)
+
+ def test_create_volume_V1(self):
+ self.my_client.features = mock.Mock()
+ self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
+ supported=False)
+ create_volume = self.my_client._invoke = mock.Mock(
+ return_value=eseries_fake.VOLUME)
+
+ volume = client.RestClient.create_volume(self.my_client,
+ 'fakePool', '1', 1)
+
+ args, kwargs = create_volume.call_args
+ verb, url, body = args
+ # Ensure the correct API was used
+ self.assertEqual('/storage-systems/{system-id}/volumes', url)
+ self.assertEqual(eseries_fake.VOLUME, volume)
+
+ def test_create_volume_V2(self):
+ self.my_client.features = mock.Mock()
+ self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
+ supported=True)
+ create_volume = self.my_client._invoke = mock.Mock(
+ return_value=eseries_fake.VOLUME)
+
+ volume = client.RestClient.create_volume(self.my_client,
+ 'fakePool', '1', 1)
+
+ args, kwargs = create_volume.call_args
+ verb, url, body = args
+ # Ensure the correct API was used
+ self.assertIn('/storage-systems/{system-id}/ssc/volumes', url,
+ 'The legacy API was used!')
+ self.assertEqual(eseries_fake.VOLUME, volume)
+
+ def test_create_volume_unsupported_specs(self):
+ self.my_client.features = mock.Mock()
+ self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
+ supported=False)
+ self.my_client.api_version = '01.52.9000.1'
+
+ self.assertRaises(exception.NetAppDriverException,
+ client.RestClient.create_volume, self.my_client,
+ '1', 'label', 1, read_cache=True)
+
@ddt.data('00.00.00.00', '01.52.9000.2', '01.52.9001.2', '01.51.9000.3',
'01.51.9001.3', '01.51.9010.5', '0.53.9000.3', '0.53.9001.4')
def test_api_version_not_support_asup(self, api_version):
client.RestClient._init_features(self.my_client)
- self.assertFalse(self.my_client.features.AUTOSUPPORT)
+ self.assertFalse(self.my_client.features.AUTOSUPPORT.supported)
@ddt.data('01.52.9000.3', '01.52.9000.4', '01.52.8999.2',
'01.52.8999.3', '01.53.8999.3', '01.53.9000.2',
client.RestClient._init_features(self.my_client)
- self.assertTrue(self.my_client.features.AUTOSUPPORT)
+ self.assertTrue(self.my_client.features.AUTOSUPPORT.supported)
+
+ @ddt.data('00.00.00.00', '01.52.9000.1', '01.52.9001.2', '00.53.9001.3',
+ '01.53.9090.1', '1.53.9010.14', '0.53.9011.15')
+ def test_api_version_not_support_ssc_api(self, api_version):
+
+ self.mock_object(client.RestClient,
+ 'get_eseries_api_info',
+ mock.Mock(return_value=('proxy', api_version)))
+
+ client.RestClient._init_features(self.my_client)
+
+ self.assertFalse(self.my_client.features.SSC_API_V2.supported)
+
+ @ddt.data('01.53.9000.1', '01.53.9000.5', '01.53.8999.1',
+ '01.53.9010.20', '01.53.9010.16', '01.54.9000.1',
+ '02.51.9000.3', '02.52.8999.3', '02.51.8999.2')
+ def test_api_version_supports_ssc_api(self, api_version):
+
+ self.mock_object(client.RestClient,
+ 'get_eseries_api_info',
+ mock.Mock(return_value=('proxy', api_version)))
+
+ client.RestClient._init_features(self.my_client)
+
+ self.assertTrue(self.my_client.features.SSC_API_V2.supported)
import ddt
import mock
+import six
from cinder import exception
from cinder import test
filtered_pool_labels = [pool['label'] for pool in filtered_pools]
self.assertListEqual(pool_labels, filtered_pool_labels)
- def test_update_ssc_info(self):
+ def test_update_ssc_info_no_ssc(self):
drives = [{'currentVolumeGroupRef': 'test_vg1',
'driveMediaType': 'ssd'}]
pools = [{'volumeGroupRef': 'test_vg1', 'label': 'test_vg1',
'raidLevel': 'raid6', 'securityType': 'enabled'}]
+ self.library._client = mock.Mock()
+ self.library._client.features.SSC_API_V2 = na_utils.FeatureState(
+ False, minimum_version="1.53.9000.1")
+ self.library._client.SSC_VALID_VERSIONS = [(1, 53, 9000, 1),
+ (1, 53, 9010, 15)]
self.library.configuration.netapp_storage_pools = "test_vg1"
self.library._client.list_storage_pools = mock.Mock(return_value=pools)
self.library._client.list_drives = mock.Mock(return_value=drives)
'netapp_raid_type': 'raid6'}},
self.library._ssc_stats)
- def test_update_ssc_disk_types_ssd(self):
- drives = [{'currentVolumeGroupRef': 'test_vg1',
- 'driveMediaType': 'ssd'}]
- pools = [{'volumeGroupRef': 'test_vg1'}]
-
- self.library._client.list_drives = mock.Mock(return_value=drives)
- self.library._client.get_storage_pool = mock.Mock(return_value=pools)
-
- ssc_stats = self.library._update_ssc_disk_types(pools)
-
- self.assertEqual({'test_vg1': {'netapp_disk_type': 'SSD'}},
- ssc_stats)
-
- def test_update_ssc_disk_types_scsi(self):
- drives = [{'currentVolumeGroupRef': 'test_vg1',
- 'interfaceType': {'driveType': 'scsi'}}]
- pools = [{'volumeGroupRef': 'test_vg1'}]
-
- self.library._client.list_drives = mock.Mock(return_value=drives)
- self.library._client.get_storage_pool = mock.Mock(return_value=pools)
-
- ssc_stats = self.library._update_ssc_disk_types(pools)
-
- self.assertEqual({'test_vg1': {'netapp_disk_type': 'SCSI'}},
- ssc_stats)
-
- def test_update_ssc_disk_types_fcal(self):
- drives = [{'currentVolumeGroupRef': 'test_vg1',
- 'interfaceType': {'driveType': 'fibre'}}]
- pools = [{'volumeGroupRef': 'test_vg1'}]
-
- self.library._client.list_drives = mock.Mock(return_value=drives)
- self.library._client.get_storage_pool = mock.Mock(return_value=pools)
-
- ssc_stats = self.library._update_ssc_disk_types(pools)
-
- self.assertEqual({'test_vg1': {'netapp_disk_type': 'FCAL'}},
- ssc_stats)
-
- def test_update_ssc_disk_types_sata(self):
- drives = [{'currentVolumeGroupRef': 'test_vg1',
- 'interfaceType': {'driveType': 'sata'}}]
- pools = [{'volumeGroupRef': 'test_vg1'}]
-
- self.library._client.list_drives = mock.Mock(return_value=drives)
- self.library._client.get_storage_pool = mock.Mock(return_value=pools)
-
- ssc_stats = self.library._update_ssc_disk_types(pools)
+ @ddt.data(True, False)
+ def test_update_ssc_info(self, data_assurance_supported):
+ self.library._client = mock.Mock()
+ self.library._client.features.SSC_API_V2 = na_utils.FeatureState(
+ True, minimum_version="1.53.9000.1")
+ self.library._client.list_ssc_storage_pools = mock.Mock(
+ return_value=eseries_fake.SSC_POOLS)
+ self.library._get_storage_pools = mock.Mock(
+ return_value=eseries_fake.STORAGE_POOLS)
+ # Data Assurance is not supported on some storage backends
+ self.library._is_data_assurance_supported = mock.Mock(
+ return_value=data_assurance_supported)
- self.assertEqual({'test_vg1': {'netapp_disk_type': 'SATA'}},
- ssc_stats)
+ self.library._update_ssc_info()
- def test_update_ssc_disk_types_sas(self):
- drives = [{'currentVolumeGroupRef': 'test_vg1',
- 'interfaceType': {'driveType': 'sas'}}]
- pools = [{'volumeGroupRef': 'test_vg1'}]
+ for pool in eseries_fake.SSC_POOLS:
+ poolId = pool['poolId']
+
+ raid_lvl = self.library.SSC_RAID_TYPE_MAPPING.get(
+ pool['raidLevel'], 'unknown')
+
+ if pool['pool']["driveMediaType"] == 'ssd':
+ disk_type = 'SSD'
+ else:
+ disk_type = pool['pool']['drivePhysicalType']
+ disk_type = (
+ self.library.SSC_DISK_TYPE_MAPPING.get(
+ disk_type, 'unknown'))
+
+ da_enabled = pool['dataAssuranceCapable'] and (
+ data_assurance_supported)
+
+ expected = {
+ 'netapp_disk_encryption':
+ six.text_type(pool['encrypted']).lower(),
+ 'netapp_eseries_flash_read_cache':
+ six.text_type(pool['flashCacheCapable']).lower(),
+ 'netapp_eseries_data_assurance':
+ six.text_type(da_enabled).lower(),
+ 'netapp_eseries_disk_spindle_speed': pool['spindleSpeed'],
+ 'netapp_raid_type': raid_lvl,
+ 'netapp_disk_type': disk_type
+ }
+ actual = self.library._ssc_stats[poolId]
+ self.assertDictMatch(expected, actual)
- self.library._client.list_drives = mock.Mock(return_value=drives)
- self.library._client.get_storage_pool = mock.Mock(return_value=pools)
+ @ddt.data(('FC', True), ('iSCSI', False))
+ @ddt.unpack
+ def test_is_data_assurance_supported(self, backend_storage_protocol,
+ enabled):
+ self.mock_object(self.library, 'driver_protocol',
+ backend_storage_protocol)
- ssc_stats = self.library._update_ssc_disk_types(pools)
+ actual = self.library._is_data_assurance_supported()
- self.assertEqual({'test_vg1': {'netapp_disk_type': 'SAS'}},
- ssc_stats)
+ self.assertEqual(enabled, actual)
- def test_update_ssc_disk_types_unknown(self):
+ @ddt.data('scsi', 'fibre', 'sas', 'sata', 'garbage')
+ def test_update_ssc_disk_types(self, disk_type):
drives = [{'currentVolumeGroupRef': 'test_vg1',
- 'interfaceType': {'driveType': 'unknown'}}]
+ 'interfaceType': {'driveType': disk_type}}]
pools = [{'volumeGroupRef': 'test_vg1'}]
self.library._client.list_drives = mock.Mock(return_value=drives)
ssc_stats = self.library._update_ssc_disk_types(pools)
- self.assertEqual({'test_vg1': {'netapp_disk_type': 'unknown'}},
+ expected = self.library.SSC_DISK_TYPE_MAPPING.get(disk_type, 'unknown')
+ self.assertEqual({'test_vg1': {'netapp_disk_type': expected}},
ssc_stats)
- def test_update_ssc_disk_types_undefined(self):
+ @ddt.data('scsi', 'fibre', 'sas', 'sata', 'garbage')
+ def test_update_ssc_disk_types_ssd(self, disk_type):
drives = [{'currentVolumeGroupRef': 'test_vg1',
- 'interfaceType': {'driveType': '__UNDEFINED'}}]
+ 'driveMediaType': 'ssd', 'driveType': disk_type}]
pools = [{'volumeGroupRef': 'test_vg1'}]
self.library._client.list_drives = mock.Mock(return_value=drives)
ssc_stats = self.library._update_ssc_disk_types(pools)
- self.assertEqual({'test_vg1': {'netapp_disk_type': 'unknown'}},
- ssc_stats)
-
- def test_update_ssc_disk_encryption_SecType_enabled(self):
- pools = [{'volumeGroupRef': 'test_vg1', 'securityType': 'enabled'}]
- self.library._client.list_storage_pools = mock.Mock(return_value=pools)
-
- ssc_stats = self.library._update_ssc_disk_encryption(pools)
-
- self.assertEqual({'test_vg1': {'netapp_disk_encryption': 'true'}},
- ssc_stats)
-
- def test_update_ssc_disk_encryption_SecType_unknown(self):
- pools = [{'volumeGroupRef': 'test_vg1', 'securityType': 'unknown'}]
- self.library._client.list_storage_pools = mock.Mock(return_value=pools)
-
- ssc_stats = self.library._update_ssc_disk_encryption(pools)
-
- self.assertEqual({'test_vg1': {'netapp_disk_encryption': 'false'}},
- ssc_stats)
-
- def test_update_ssc_disk_encryption_SecType_none(self):
- pools = [{'volumeGroupRef': 'test_vg1', 'securityType': 'none'}]
- self.library._client.list_storage_pools = mock.Mock(return_value=pools)
-
- ssc_stats = self.library._update_ssc_disk_encryption(pools)
-
- self.assertEqual({'test_vg1': {'netapp_disk_encryption': 'false'}},
+ self.assertEqual({'test_vg1': {'netapp_disk_type': 'SSD'}},
ssc_stats)
- def test_update_ssc_disk_encryption_SecType_capable(self):
- pools = [{'volumeGroupRef': 'test_vg1', 'securityType': 'capable'}]
+ @ddt.data('enabled', 'none', 'capable', 'unknown', '__UNDEFINED',
+ 'garbage')
+ def test_update_ssc_disk_encryption(self, securityType):
+ pools = [{'volumeGroupRef': 'test_vg1', 'securityType': securityType}]
self.library._client.list_storage_pools = mock.Mock(return_value=pools)
ssc_stats = self.library._update_ssc_disk_encryption(pools)
- self.assertEqual({'test_vg1': {'netapp_disk_encryption': 'false'}},
+ # Convert the boolean value to a lower-case string value
+ expected = 'true' if securityType == "enabled" else 'false'
+ self.assertEqual({'test_vg1': {'netapp_disk_encryption': expected}},
ssc_stats)
- def test_update_ssc_disk_encryption_SecType_garbage(self):
- pools = [{'volumeGroupRef': 'test_vg1', 'securityType': 'garbage'}]
- self.library._client.list_storage_pools = mock.Mock(return_value=pools)
-
- ssc_stats = self.library._update_ssc_disk_encryption(pools)
-
- self.assertRaises(TypeError, 'test_vg1',
- {'netapp_disk_encryption': 'false'}, ssc_stats)
-
def test_update_ssc_disk_encryption_multiple(self):
pools = [{'volumeGroupRef': 'test_vg1', 'securityType': 'none'},
{'volumeGroupRef': 'test_vg2', 'securityType': 'enabled'}]
def test_create_asup(self):
self.library._client = mock.Mock()
- self.library._client.features.AUTOSUPPORT = True
+ self.library._client.features.AUTOSUPPORT = na_utils.FeatureState()
self.library._client.api_operating_mode = (
eseries_fake.FAKE_ASUP_DATA['operating-mode'])
self.library._app_version = eseries_fake.FAKE_APP_VERSION
def test_create_asup_not_supported(self):
self.library._client = mock.Mock()
- self.library._client.features.AUTOSUPPORT = False
+ self.library._client.features.AUTOSUPPORT = na_utils.FeatureState(
+ supported=False)
mock_invoke = self.mock_object(
self.library._client, 'add_autosupport_data')
mock_invoke.assert_not_called()
+@ddt.ddt
class NetAppEseriesLibraryMultiAttachTestCase(test.TestCase):
"""Test driver when netapp_enable_multiattach is enabled.
self.library.create_volume(get_fake_volume())
self.assertTrue(self.library._client.create_volume.call_count)
+ @ddt.data(('netapp_eseries_flash_read_cache', 'flash_cache', 'true'),
+ ('netapp_eseries_flash_read_cache', 'flash_cache', 'false'),
+ ('netapp_eseries_flash_read_cache', 'flash_cache', None),
+ ('netapp_eseries_data_assurance', 'data_assurance', 'true'),
+ ('netapp_eseries_data_assurance', 'data_assurance', 'false'),
+ ('netapp_eseries_data_assurance', 'data_assurance', None),
+ ('netapp:write_cache', 'write_cache', 'true'),
+ ('netapp:write_cache', 'write_cache', 'false'),
+ ('netapp:write_cache', 'write_cache', None),
+ ('netapp:read_cache', 'read_cache', 'true'),
+ ('netapp:read_cache', 'read_cache', 'false'),
+ ('netapp:read_cache', 'read_cache', None),
+ ('netapp_eseries_flash_read_cache', 'flash_cache', 'True'),
+ ('netapp_eseries_flash_read_cache', 'flash_cache', '1'),
+ ('netapp_eseries_data_assurance', 'data_assurance', ''))
+ @ddt.unpack
+ def test_create_volume_with_extra_spec(self, spec, key, value):
+ fake_volume = get_fake_volume()
+ extra_specs = {spec: value}
+ volume = copy.deepcopy(eseries_fake.VOLUME)
+
+ self.library._client.create_volume = mock.Mock(
+ return_value=volume)
+ # Make this utility method return our extra spec
+ mocked_spec_method = self.mock_object(na_utils,
+ 'get_volume_extra_specs')
+ mocked_spec_method.return_value = extra_specs
+
+ self.library.create_volume(fake_volume)
+
+ self.assertEqual(1, self.library._client.create_volume.call_count)
+ # Ensure create_volume is called with the correct argument
+ args, kwargs = self.library._client.create_volume.call_args
+ self.assertIn(key, kwargs)
+ if(value is not None):
+ expected = na_utils.to_bool(value)
+ else:
+ expected = value
+ self.assertEqual(expected, kwargs[key])
+
def test_create_volume_too_many_volumes(self):
self.library._client.list_volumes = mock.Mock(
return_value=[eseries_fake.VOLUME for __ in
def test_add_feature_default(self):
self.features.add_feature('FEATURE_1')
- self.assertTrue(self.features.FEATURE_1)
+ self.assertTrue(self.features.FEATURE_1.supported)
self.assertIn('FEATURE_1', self.features.defined_features)
@ddt.data(True, False)
def test_add_feature(self, value):
self.features.add_feature('FEATURE_2', value)
- self.assertEqual(value, self.features.FEATURE_2)
+ self.assertEqual(value, bool(self.features.FEATURE_2))
+ self.assertEqual(value, self.features.FEATURE_2.supported)
+ self.assertEqual(None, self.features.FEATURE_2.minimum_version)
+ self.assertIn('FEATURE_2', self.features.defined_features)
+
+ @ddt.data((True, '1'), (False, 2), (False, None), (True, None))
+ @ddt.unpack
+ def test_add_feature_min_version(self, enabled, min_version):
+ self.features.add_feature('FEATURE_2', enabled,
+ min_version=min_version)
+
+ self.assertEqual(enabled, bool(self.features.FEATURE_2))
+ self.assertEqual(enabled, self.features.FEATURE_2.supported)
+ self.assertEqual(min_version, self.features.FEATURE_2.minimum_version)
self.assertIn('FEATURE_2', self.features.defined_features)
@ddt.data('True', 'False', 0, 1, 1.0, None, [], {}, (True,))
"""REST client specific to e-series storage service."""
ASUP_VALID_VERSION = (1, 52, 9000, 3)
+ # We need to check for both the release and the pre-release versions
+ SSC_VALID_VERSIONS = ((1, 53, 9000, 1), (1, 53, 9010, 16))
def __init__(self, scheme, host, port, service_path, username,
password, **kwargs):
api_version_tuple = tuple(int(version)
for version in self.api_version.split('.'))
- api_valid_version = self._validate_version(self.ASUP_VALID_VERSION,
- api_version_tuple)
- self.features.add_feature('AUTOSUPPORT', supported=api_valid_version)
+ asup_api_valid_version = self._validate_version(
+ self.ASUP_VALID_VERSION, api_version_tuple)
+
+ ssc_api_valid_version = any(self._validate_version(valid_version,
+ api_version_tuple)
+ for valid_version
+ in self.SSC_VALID_VERSIONS)
+
+ self.features.add_feature('AUTOSUPPORT',
+ supported=asup_api_valid_version,
+ min_version=self._version_tuple_to_str(
+ self.ASUP_VALID_VERSION))
+ self.features.add_feature('SSC_API_V2',
+ supported=ssc_api_valid_version,
+ min_version=self._version_tuple_to_str(
+ self.SSC_VALID_VERSIONS[0]))
+
+ def _version_tuple_to_str(self, version):
+ return ".".join([str(part) for part in version])
def _validate_version(self, version, actual_version):
"""Determine if version is newer than, or equal to the actual version
msg = _("Response error code - %s.") % status_code
raise exception.NetAppDriverException(msg)
- def create_volume(self, pool, label, size, unit='gb', seg_size=0):
- """Creates volume on array."""
- path = "/storage-systems/{system-id}/volumes"
- data = {'poolId': pool, 'name': label, 'sizeUnit': unit,
- 'size': int(size), 'segSize': seg_size}
+ def create_volume(self, pool, label, size, unit='gb', seg_size=0,
+ read_cache=None, write_cache=None, flash_cache=None,
+ data_assurance=None):
+ """Creates a volume on array with the configured attributes
+
+ Note: if read_cache, write_cache, flash_cache, or data_assurance
+ are not provided, the default will be utilized by the Webservice.
+
+ :param pool: The pool unique identifier
+ :param label: The unqiue label for the volume
+ :param size: The capacity in units
+ :param unit: The unit for capacity
+ :param seg_size: The segment size for the volume, expressed in KB.
+ Default will allow the Webservice to choose.
+ :param read_cache: If true, enable read caching, if false,
+ explicitly disable it.
+ :param write_cache: If true, enable write caching, if false,
+ explicitly disable it.
+ :param flash_cache: If true, add the volume to a Flash Cache
+ :param data_assurance: If true, enable the Data Assurance capability
+ :return The created volume
+ """
+
+ # Utilize the new API if it is available
+ if self.features.SSC_API_V2:
+ path = "/storage-systems/{system-id}/ssc/volumes"
+ data = {'poolId': pool, 'name': label, 'sizeUnit': unit,
+ 'size': int(size), 'dataAssuranceEnable': data_assurance,
+ 'flashCacheEnable': flash_cache,
+ 'readCacheEnable': read_cache,
+ 'writeCacheEnable': write_cache}
+ # Use the old API
+ else:
+ # Determine if there are were extra specs provided that are not
+ # supported
+ extra_specs = [read_cache, write_cache]
+ unsupported_spec = any([spec is not None for spec in extra_specs])
+ if(unsupported_spec):
+ msg = _("E-series proxy API version %(current_version)s does "
+ "not support full set of SSC extra specs. The proxy"
+ " version must be at at least %(min_version)s.")
+ min_version = self.features.SSC_API_V2.minimum_version
+ raise exception.NetAppDriverException(msg %
+ {'current_version':
+ self.api_version,
+ 'min_version':
+ min_version})
+
+ path = "/storage-systems/{system-id}/volumes"
+ data = {'poolId': pool, 'name': label, 'sizeUnit': unit,
+ 'size': int(size), 'segSize': seg_size}
return self._invoke('POST', path, data)
def delete_volume(self, object_id):
path = "/storage-systems/{system-id}/snapshot-volumes/{object-id}"
return self._invoke('DELETE', path, **{'object-id': object_id})
+ def list_ssc_storage_pools(self):
+ """Lists pools and their service quality defined on the array."""
+ path = "/storage-systems/{system-id}/ssc/pools"
+ return self._invoke('GET', path)
+
+ def get_ssc_storage_pool(self, volume_group_ref):
+ """Get storage pool service quality information from the array."""
+ path = "/storage-systems/{system-id}/ssc/pools/{object-id}"
+ return self._invoke('GET', path, **{'object-id': volume_group_ref})
+
def list_storage_pools(self):
"""Lists storage pools in the array."""
path = "/storage-systems/{system-id}/storage-pools"
'fibre': 'FCAL',
'sas': 'SAS',
'sata': 'SATA',
+ 'ssd': 'SSD',
}
SSC_RAID_TYPE_MAPPING = {
'raidDiskPool': 'DDP',
'raid5': 'raid5',
'raid6': 'raid6',
}
+ READ_CACHE_Q_SPEC = 'netapp:read_cache'
+ WRITE_CACHE_Q_SPEC = 'netapp:write_cache'
+ DA_UQ_SPEC = 'netapp_eseries_data_assurance'
+ FLASH_CACHE_UQ_SPEC = 'netapp_eseries_flash_read_cache'
+ DISK_TYPE_UQ_SPEC = 'netapp_disk_type'
+ ENCRYPTION_UQ_SPEC = 'netapp_disk_encryption'
+ SPINDLE_SPD_UQ_SPEC = 'netapp_eseries_disk_spindle_speed'
+ RAID_UQ_SPEC = 'netapp_raid_type'
SSC_UPDATE_INTERVAL = 60 # seconds
WORLDWIDENAME = 'worldWideName'
eseries_volume_label = utils.convert_uuid_to_es_fmt(volume['name_id'])
+ extra_specs = na_utils.get_volume_extra_specs(volume)
+
# get size of the requested volume creation
size_gb = int(volume['size'])
- self._create_volume(eseries_pool_label,
- eseries_volume_label,
- size_gb)
+ self._create_volume(eseries_pool_label, eseries_volume_label, size_gb,
+ extra_specs)
def _create_volume(self, eseries_pool_label, eseries_volume_label,
- size_gb):
+ size_gb, extra_specs=None):
"""Creates volume with given label and size."""
+ if extra_specs is None:
+ extra_specs = {}
if self.configuration.netapp_enable_multiattach:
volumes = self._client.list_volumes()
{'req': utils.MAX_LUNS_PER_HOST_GROUP})
raise exception.NetAppDriverException(msg)
+ # These must be either boolean values, or None
+ read_cache = extra_specs.get(self.READ_CACHE_Q_SPEC)
+ if read_cache is not None:
+ read_cache = na_utils.to_bool(read_cache)
+
+ write_cache = extra_specs.get(self.WRITE_CACHE_Q_SPEC)
+ if write_cache is not None:
+ write_cache = na_utils.to_bool(write_cache)
+
+ flash_cache = extra_specs.get(self.FLASH_CACHE_UQ_SPEC)
+ if flash_cache is not None:
+ flash_cache = na_utils.to_bool(flash_cache)
+
+ data_assurance = extra_specs.get(self.DA_UQ_SPEC)
+ if data_assurance is not None:
+ data_assurance = na_utils.to_bool(data_assurance)
+
target_pool = None
pools = self._get_storage_pools()
try:
vol = self._client.create_volume(target_pool['volumeGroupRef'],
- eseries_volume_label, size_gb)
+ eseries_volume_label, size_gb,
+ read_cache=read_cache,
+ write_cache=write_cache,
+ flash_cache=flash_cache,
+ data_assurance=data_assurance)
LOG.info(_LI("Created volume with "
"label %s."), eseries_volume_label)
except exception.NetAppDriverException as e:
return vol
+ def _is_data_assurance_supported(self):
+ """Determine if the storage backend is PI (DataAssurance) compatible"""
+ return self.driver_protocol != "iSCSI"
+
def _schedule_and_create_volume(self, label, size_gb):
"""Creates volume with given label and size."""
avl_pools = self._get_sorted_available_storage_pools(size_gb)
"""
LOG.info(_LI("Updating storage service catalog information for "
"backend '%s'"), self._backend_name)
+
relevant_pools = self._get_storage_pools()
+
+ if self._client.features.SSC_API_V2:
+ self._update_ssc_info_v2(relevant_pools)
+ else:
+ self._update_ssc_info_v1(relevant_pools)
+
+ def _update_ssc_info_v1(self, relevant_pools):
+ """Update ssc data using the legacy API
+
+ :param relevant_pools: The pools that this driver cares about
+ """
+ msg = _LI("E-series proxy API version %(version)s does not "
+ "support full set of SSC extra specs. The proxy version"
+ " must be at at least %(min_version)s.")
+ LOG.info(msg, {'version': self._client.api_version,
+ 'min_version':
+ self._client.features.SSC_API_V2.minimum_version})
+
self._ssc_stats = (
self._update_ssc_disk_encryption(relevant_pools))
self._ssc_stats = (
self._ssc_stats = (
self._update_ssc_raid_type(relevant_pools))
+ def _update_ssc_info_v2(self, relevant_pools):
+ """Update the ssc dictionary with ssc info for relevant pools
+
+ :param relevant_pools: The pools that this driver cares about
+ """
+ ssc_stats = copy.deepcopy(self._ssc_stats)
+
+ storage_pool_labels = [pool['label'] for pool in relevant_pools]
+
+ ssc_data = self._client.list_ssc_storage_pools()
+ ssc_data = [pool for pool in ssc_data
+ if pool['name'] in storage_pool_labels]
+
+ for pool in ssc_data:
+ poolId = pool['poolId']
+ if poolId not in ssc_stats:
+ ssc_stats[poolId] = {}
+
+ pool_ssc_info = ssc_stats[poolId]
+
+ encrypted = pool['encrypted']
+ pool_ssc_info[self.ENCRYPTION_UQ_SPEC] = (
+ six.text_type(encrypted).lower())
+
+ pool_ssc_info[self.SPINDLE_SPD_UQ_SPEC] = (pool['spindleSpeed'])
+
+ flash_cache_capable = pool['flashCacheCapable']
+ pool_ssc_info[self.FLASH_CACHE_UQ_SPEC] = (
+ six.text_type(flash_cache_capable).lower())
+
+ # Data Assurance is not compatible with some backend types
+ da_capable = pool['dataAssuranceCapable'] and (
+ self._is_data_assurance_supported())
+ pool_ssc_info[self.DA_UQ_SPEC] = (
+ six.text_type(da_capable).lower())
+
+ pool_ssc_info[self.RAID_UQ_SPEC] = (
+ self.SSC_RAID_TYPE_MAPPING.get(pool['raidLevel'], 'unknown'))
+
+ if pool['pool'].get("driveMediaType") == 'ssd':
+ pool_ssc_info[self.DISK_TYPE_UQ_SPEC] = 'SSD'
+ else:
+ pool_ssc_info[self.DISK_TYPE_UQ_SPEC] = (
+ self.SSC_DISK_TYPE_MAPPING.get(
+ pool['pool'].get('drivePhysicalType'), 'unknown'))
+
+ self._ssc_stats = ssc_stats
+
def _update_ssc_disk_types(self, storage_pools):
"""Updates the given ssc dictionary with new disk type information.
ssc_stats[current_vol_group] = {}
if drive.get("driveMediaType") == 'ssd':
- ssc_stats[current_vol_group]['netapp_disk_type'] = 'SSD'
+ ssc_stats[current_vol_group][self.DISK_TYPE_UQ_SPEC] = 'SSD'
else:
disk_type = drive.get('interfaceType').get('driveType')
- ssc_stats[current_vol_group]['netapp_disk_type'] = \
- self.SSC_DISK_TYPE_MAPPING.get(disk_type, 'unknown')
+ ssc_stats[current_vol_group][self.DISK_TYPE_UQ_SPEC] = (
+ self.SSC_DISK_TYPE_MAPPING.get(disk_type, 'unknown'))
return ssc_stats
if current_vol_group not in ssc_stats:
ssc_stats[current_vol_group] = {}
- ssc_stats[current_vol_group]['netapp_disk_encryption'] = 'true' \
- if pool['securityType'] == 'enabled' else 'false'
+ ssc_stats[current_vol_group][self.ENCRYPTION_UQ_SPEC] = (
+ six.text_type(pool['securityType'] == 'enabled').lower()
+ )
return ssc_stats
def __init__(self):
self.defined_features = set()
- def add_feature(self, name, supported=True):
+ def add_feature(self, name, supported=True, min_version=None):
if not isinstance(supported, bool):
raise TypeError("Feature value must be a bool type.")
self.defined_features.add(name)
- setattr(self, name, supported)
+ setattr(self, name, FeatureState(supported, min_version))
def __getattr__(self, name):
# NOTE(cknight): Needed to keep pylint happy.
raise AttributeError
+
+
+class FeatureState(object):
+
+ def __init__(self, supported=True, minimum_version=None):
+ """Represents the current state of enablement for a Feature
+
+ :param supported: True if supported, false otherwise
+ :param minimum_version: The minimum version that this feature is
+ suported at
+ """
+ self.supported = supported
+ self.minimum_version = minimum_version
+
+ def __nonzero__(self):
+ """Allow a FeatureState object to be tested for truth value
+
+ :return True if the feature is supported, otherwise False
+ """
+ return self.supported