"dssMaxSegmentSize": 131072,
"totalSizeInBytes": "1073741824", "raidLevel": "raid6",
"volumeRef": "0200000060080E500023BB34000003FB515C2293",
- "listOfMappings": [], "sectorOffset": "15",
+ "listOfMappings": [{
+ "lunMappingRef":"8800000000000000000000000000000000000000",
+ "lun": 0,
+ "ssid": 16384,
+ "perms": 15,
+ "volumeRef": "0200000060080E500023BB34000003FB515C2293",
+ "type": "all",
+ "mapRef": "8400000060080E500023C73400300381515BFBA3"
+ }], "sectorOffset": "15",
"id": "0200000060080E500023BB34000003FB515C2293",
"wwn": "60080E500023BB3400001FC352D14CB2",
"capacity": "2147483648", "mgmtClientAttribute": 0,
"dssMaxSegmentSize": 131072,
"totalSizeInBytes": "1073741824", "raidLevel": "raid6",
"volumeRef": "0200000060080E500023BB34000003FB515C2293",
- "listOfMappings": [], "sectorOffset": "15",
+ "listOfMappings": [{
+ "lunMappingRef":"8800000000000000000000000000000000000000",
+ "lun": 0,
+ "ssid": 16384,
+ "perms": 15,
+ "volumeRef": "0200000060080E500023BB34000003FB515C2293",
+ "type": "all",
+ "mapRef": "8400000060080E500023C73400300381515BFBA3"
+ }], "sectorOffset": "15",
"id": "0200000060080E500023BB34000003FB515C2293",
"wwn": "60080E500023BB3400001FC352D14CB2",
"capacity": "2147483648", "mgmtClientAttribute": 0,
"dssMaxSegmentSize": 131072,
"totalSizeInBytes": "1073741824", "raidLevel": "raid6",
"volumeRef": "0200000060080E500023BB34000003FB515C2293",
- "listOfMappings": [], "sectorOffset": "15",
+ "listOfMappings": [{
+ "lunMappingRef":"8800000000000000000000000000000000000000",
+ "lun": 0,
+ "ssid": 16384,
+ "perms": 15,
+ "volumeRef": "0200000060080E500023BB34000003FB515C2293",
+ "type": "all",
+ "mapRef": "8400000060080E500023C73400300381515BFBA3"
+ }], "sectorOffset": "15",
"id": "0200000060080E500023BB34000003FB515C2293",
"wwn": "60080E500023BB3400001FC352D14CB2",
"capacity": "2147483648", "mgmtClientAttribute": 0,
configuration.netapp_login = 'rw'
configuration.netapp_password = 'rw'
configuration.netapp_storage_pools = 'DDP'
+ configuration.netapp_enable_multiattach = False
return configuration
def test_embedded_mode(self):
self.driver.delete_volume(self.volume)
self.assertEqual(1, self.driver.db.volume_get.call_count)
- def test_map_unmap(self):
- self.driver.create_volume(self.volume)
- connection_info = self.driver.initialize_connection(self.volume,
- self.connector)
- self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
- properties = connection_info.get('data')
- self.assertIsNotNone(properties, 'Target portal is none')
- self.driver.terminate_connection(self.volume, self.connector)
- self.driver.delete_volume(self.volume)
-
- def test_map_already_mapped_same_host(self):
- self.driver.create_volume(self.volume)
-
- maps = [{'lunMappingRef': 'hdkjsdhjsdh',
- 'mapRef': '8400000060080E500023C73400300381515BFBA3',
- 'volumeRef': '0200000060080E500023BB34000003FB515C2293',
- 'lun': 2}]
- self.driver._get_host_mapping_for_vol_frm_array = mock.Mock(
- return_value=maps)
- self.driver._get_free_lun = mock.Mock()
- info = self.driver.initialize_connection(self.volume, self.connector)
- self.assertEqual(
- self.driver._get_host_mapping_for_vol_frm_array.call_count, 1)
- self.assertEqual(self.driver._get_free_lun.call_count, 0)
- self.assertEqual(info['driver_volume_type'], 'iscsi')
- properties = info.get('data')
- self.assertIsNotNone(properties, 'Target portal is none')
- self.driver.delete_volume(self.volume)
-
- def test_map_already_mapped_diff_host(self):
- self.driver.create_volume(self.volume)
-
- maps = [{'lunMappingRef': 'hdkjsdhjsdh',
- 'mapRef': '7400000060080E500023C73400300381515BFBA3',
- 'volumeRef': 'CFDXJ67BLJH25DXCZFZD4NSF54',
- 'lun': 2}]
- self.driver._get_host_mapping_for_vol_frm_array = mock.Mock(
- return_value=maps)
- self.driver._get_vol_mapping_for_host_frm_array = mock.Mock(
- return_value=[])
- self.driver._get_free_lun = mock.Mock(return_value=0)
- self.driver._del_vol_mapping_frm_cache = mock.Mock()
- info = self.driver.initialize_connection(self.volume, self.connector)
- self.assertEqual(
- self.driver._get_vol_mapping_for_host_frm_array.call_count, 1)
- self.assertEqual(
- self.driver._get_host_mapping_for_vol_frm_array.call_count, 1)
- self.assertEqual(self.driver._get_free_lun.call_count, 1)
- self.assertEqual(self.driver._del_vol_mapping_frm_cache.call_count, 1)
- self.assertEqual(info['driver_volume_type'], 'iscsi')
- properties = info.get('data')
- self.assertIsNotNone(properties, 'Target portal is none')
- self.driver.terminate_connection(self.volume, self.connector)
- self.driver.delete_volume(self.volume)
-
def test_cloned_volume_destroy(self):
self.driver.db = mock.Mock(
volume_get=mock.Mock(return_value=self.volume))
self.assertEqual(1, self.driver.db.volume_get.call_count)
self.driver.delete_volume(self.volume)
- def test_map_by_creating_host(self):
- self.driver.create_volume(self.volume)
- connector_new = {'initiator': 'iqn.1993-08.org.debian:01:1001'}
- connection_info = self.driver.initialize_connection(self.volume,
- connector_new)
- self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
- properties = connection_info.get('data')
- self.assertIsNotNone(properties, 'Target portal is none')
-
def test_vol_stats(self):
self.driver.get_volume_stats(refresh=False)
self.driver._get_iscsi_portal_for_vol,
vol_nomatch, portals, False)
- def test_get_host_right_type(self):
- self.driver._get_host_with_port = mock.Mock(
- return_value={'hostTypeIndex': 2, 'name': 'test'})
- self.driver._get_host_type_definition = mock.Mock(
- return_value={'index': 2, 'name': 'LnxALUA'})
- host = self.driver._get_or_create_host('port', 'LinuxALUA')
- self.assertEqual(host, {'hostTypeIndex': 2, 'name': 'test'})
- self.driver._get_host_with_port.assert_called_once_with('port')
- self.driver._get_host_type_definition.assert_called_once_with(
- 'LinuxALUA')
-
- def test_get_host_update_type(self):
- self.driver._get_host_with_port = mock.Mock(
- return_value={'hostTypeIndex': 2, 'hostRef': 'test'})
- self.driver._get_host_type_definition = mock.Mock(
- return_value={'index': 3, 'name': 'LnxALUA'})
- self.driver._client.update_host_type = mock.Mock(
- return_value={'hostTypeIndex': 3, 'hostRef': 'test'})
- host = self.driver._get_or_create_host('port', 'LinuxALUA')
- self.assertEqual(host, {'hostTypeIndex': 3, 'hostRef': 'test'})
- self.driver._get_host_with_port.assert_called_once_with('port')
- self.driver._get_host_type_definition.assert_called_once_with(
- 'LinuxALUA')
- self.assertEqual(self.driver._client.update_host_type.call_count, 1)
-
- def test_get_host_update_type_failed(self):
- self.driver._get_host_with_port = mock.Mock(
- return_value={'hostTypeIndex': 2, 'hostRef': 'test',
- 'label': 'test'})
- self.driver._get_host_type_definition = mock.Mock(
- return_value={'index': 3, 'name': 'LnxALUA'})
- self.driver._client.update_host_type = mock.Mock(
- side_effect=exception.NetAppDriverException)
- host = self.driver._get_or_create_host('port', 'LinuxALUA')
- self.assertEqual(host, {'hostTypeIndex': 2, 'hostRef': 'test',
- 'label': 'test'})
- self.driver._get_host_with_port.assert_called_once_with('port')
- self.driver._get_host_type_definition.assert_called_once_with(
- 'LinuxALUA')
- self.assertEqual(self.driver._client.update_host_type.call_count, 1)
-
- def test_get_host_not_found(self):
- self.driver._get_host_with_port = mock.Mock(
- side_effect=exception.NotFound)
- self.driver._create_host = mock.Mock()
- self.driver._get_or_create_host('port', 'LnxALUA')
- self.driver._get_host_with_port.assert_called_once_with('port')
- self.driver._create_host.assert_called_once_with('port', 'LnxALUA')
-
def test_setup_error_unsupported_host_type(self):
configuration = self._set_config(create_configuration())
configuration.netapp_host_type = 'garbage'
--- /dev/null
+# Copyright (c) - 2015, Alex Meade. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import copy
+
+import mock
+
+from cinder.volume import configuration as conf
+from cinder.volume.drivers.netapp.eseries import utils
+import cinder.volume.drivers.netapp.options as na_opts
+
+
+MULTIATTACH_HOST_GROUP = {
+ 'clusterRef': '8500000060080E500023C7340036035F515B78FC',
+ 'label': utils.MULTI_ATTACH_HOST_GROUP_NAME,
+}
+
+FOREIGN_HOST_GROUP = {
+ 'clusterRef': '8500000060080E500023C7340036035F515B78FD',
+ 'label': 'FOREIGN HOST GROUP',
+}
+
+STORAGE_POOL = {
+ 'label': 'DDP',
+ 'volumeGroupRef': 'fakevolgroupref',
+}
+
+VOLUME = {
+ 'extremeProtection': False,
+ 'pitBaseVolume': True,
+ 'dssMaxSegmentSize': 131072,
+ 'totalSizeInBytes': '1073741824',
+ 'raidLevel': 'raid6',
+ 'volumeRef': '0200000060080E500023BB34000003FB515C2293',
+ 'listOfMappings': [],
+ 'sectorOffset': '15',
+ 'id': '0200000060080E500023BB34000003FB515C2293',
+ 'wwn': '60080E500023BB3400001FC352D14CB2',
+ 'capacity': '2147483648',
+ 'mgmtClientAttribute': 0,
+ 'label': 'CFDXJ67BLJH25DXCZFZD4NSF54',
+ 'volumeFull': False,
+ 'blkSize': 512,
+ 'volumeCopyTarget': False,
+ 'volumeGroupRef': '0400000060080E500023BB3400001F9F52CECC3F',
+ 'preferredControllerId': '070000000000000000000001',
+ 'currentManager': '070000000000000000000001',
+ 'applicationTagOwned': False,
+ 'status': 'optimal',
+ 'segmentSize': 131072,
+ 'volumeUse': 'standardVolume',
+ 'action': 'none',
+ 'preferredManager': '070000000000000000000001',
+ 'volumeHandle': 15,
+ 'offline': False,
+ 'preReadRedundancyCheckEnabled': False,
+ 'dssPreallocEnabled': False,
+ 'name': 'bdm-vc-test-1',
+ 'worldWideName': '60080E500023BB3400001FC352D14CB2',
+ 'currentControllerId': '070000000000000000000001',
+ 'protectionInformationCapable': False,
+ 'mapped': False,
+ 'reconPriority': 1,
+ 'protectionType': 'type1Protection'
+}
+
+INITIATOR_NAME = 'iqn.1998-01.com.vmware:localhost-28a58148'
+INITIATOR_NAME_2 = 'iqn.1998-01.com.vmware:localhost-28a58149'
+
+HOST = {
+ 'isSAControlled': False,
+ 'confirmLUNMappingCreation': False,
+ 'label': 'stlrx300s7-55',
+ 'isLargeBlockFormatHost': False,
+ 'clusterRef': '8500000060080E500023C7340036035F515B78FC',
+ 'protectionInformationCapableAccessMethod': False,
+ 'ports': [],
+ 'hostRef': '8400000060080E500023C73400300381515BFBA3',
+ 'hostTypeIndex': 6,
+ 'hostSidePorts': [{
+ 'label': 'NewStore',
+ 'type': 'iscsi',
+ 'address': INITIATOR_NAME}]
+}
+HOST_2 = {
+ 'isSAControlled': False,
+ 'confirmLUNMappingCreation': False,
+ 'label': 'stlrx300s7-55',
+ 'isLargeBlockFormatHost': False,
+ 'clusterRef': utils.NULL_REF,
+ 'protectionInformationCapableAccessMethod': False,
+ 'ports': [],
+ 'hostRef': '8400000060080E500023C73400300381515BFBA5',
+ 'hostTypeIndex': 6,
+ 'hostSidePorts': [{
+ 'label': 'NewStore', 'type': 'iscsi',
+ 'address': INITIATOR_NAME_2}]
+}
+
+VOLUME_MAPPING = {
+ 'lunMappingRef': '8800000000000000000000000000000000000000',
+ 'lun': 0,
+ 'ssid': 16384,
+ 'perms': 15,
+ 'volumeRef': VOLUME['volumeRef'],
+ 'type': 'all',
+ 'mapRef': HOST['hostRef']
+}
+
+VOLUME_MAPPING_TO_MULTIATTACH_GROUP = copy.deepcopy(VOLUME_MAPPING)
+VOLUME_MAPPING_TO_MULTIATTACH_GROUP.update(
+ {'mapRef': MULTIATTACH_HOST_GROUP['clusterRef']}
+)
+
+STORAGE_SYSTEM = {
+ 'freePoolSpace': 11142431623168,
+ 'driveCount': 24,
+ 'hostSparesUsed': 0, 'id':
+ '1fa6efb5-f07b-4de4-9f0e-52e5f7ff5d1b',
+ 'hotSpareSizeAsString': '0', 'wwn':
+ '60080E500023C73400000000515AF323',
+ 'parameters': {
+ 'minVolSize': 1048576, 'maxSnapshotsPerBase': 16,
+ 'maxDrives': 192,
+ 'maxVolumes': 512,
+ 'maxVolumesPerGroup': 256,
+ 'maxMirrors': 0,
+ 'maxMappingsPerVolume': 1,
+ 'maxMappableLuns': 256,
+ 'maxVolCopys': 511,
+ 'maxSnapshots': 256
+ }, 'hotSpareCount': 0,
+ 'hostSpareCountInStandby': 0,
+ 'status': 'needsattn',
+ 'trayCount': 1,
+ 'usedPoolSpaceAsString': '5313000380416',
+ 'ip2': '10.63.165.216',
+ 'ip1': '10.63.165.215',
+ 'freePoolSpaceAsString': '11142431623168',
+ 'types': 'SAS',
+ 'name': 'stle2600-7_8',
+ 'hotSpareSize': 0,
+ 'usedPoolSpace': 5313000380416,
+ 'driveTypes': ['sas'],
+ 'unconfiguredSpaceByDriveType': {},
+ 'unconfiguredSpaceAsStrings': '0',
+ 'model': '2650',
+ 'unconfiguredSpace': 0
+}
+
+SNAPSHOT_GROUP = {
+ 'status': 'optimal',
+ 'autoDeleteLimit': 0,
+ 'maxRepositoryCapacity': '-65536',
+ 'rollbackStatus': 'none',
+ 'unusableRepositoryCapacity': '0',
+ 'pitGroupRef':
+ '3300000060080E500023C7340000098D5294AC9A',
+ 'clusterSize': 65536,
+ 'label': 'C6JICISVHNG2TFZX4XB5ZWL7O',
+ 'maxBaseCapacity': '476187142128128',
+ 'repositoryVolume': '3600000060080E500023BB3400001FA952CEF12C',
+ 'fullWarnThreshold': 99,
+ 'repFullPolicy': 'purgepit',
+ 'action': 'none',
+ 'rollbackPriority': 'medium',
+ 'creationPendingStatus': 'none',
+ 'consistencyGroupRef': '0000000000000000000000000000000000000000',
+ 'volumeHandle': 49153,
+ 'consistencyGroup': False,
+ 'baseVolume': '0200000060080E500023C734000009825294A534'
+}
+
+SNAPSHOT_IMAGE = {
+ 'status': 'optimal',
+ 'pitCapacity': '2147483648',
+ 'pitTimestamp': '1389315375',
+ 'pitGroupRef': '3300000060080E500023C7340000098D5294AC9A',
+ 'creationMethod': 'user',
+ 'repositoryCapacityUtilization': '2818048',
+ 'activeCOW': True,
+ 'isRollbackSource': False,
+ 'pitRef': '3400000060080E500023BB3400631F335294A5A8',
+ 'pitSequenceNumber': '19'
+}
+
+HARDWARE_INVENTORY = {
+ 'iscsiPorts': [
+ {
+ 'controllerId':
+ '070000000000000000000002',
+ 'ipv4Enabled': True,
+ 'ipv4Data': {
+ 'ipv4Address': '0.0.0.0',
+ 'ipv4AddressConfigMethod':
+ 'configStatic',
+ 'ipv4VlanId': {
+ 'isEnabled': False,
+ 'value': 0
+ },
+ 'ipv4AddressData': {
+ 'ipv4Address': '172.20.123.66',
+ 'ipv4SubnetMask': '255.255.255.0',
+ 'configState': 'configured',
+ 'ipv4GatewayAddress': '0.0.0.0'
+ }
+ },
+ 'tcpListenPort': 3260,
+ 'interfaceRef': '2202040000000000000000000000000000000000',
+ 'iqn': 'iqn.1992-01.com.lsi:2365.60080e500023c73400000000515af323'
+ }
+ ]
+}
+
+
+def create_configuration_eseries():
+ config = conf.Configuration(None)
+ config.append_config_values(na_opts.netapp_connection_opts)
+ config.append_config_values(na_opts.netapp_transport_opts)
+ config.append_config_values(na_opts.netapp_basicauth_opts)
+ config.append_config_values(na_opts.netapp_provisioning_opts)
+ config.append_config_values(na_opts.netapp_eseries_opts)
+ config.netapp_storage_protocol = 'iscsi'
+ config.netapp_login = 'rw'
+ config.netapp_password = 'rw'
+ config.netapp_server_hostname = '127.0.0.1'
+ config.netapp_transport_type = 'http'
+ config.netapp_server_port = '8080'
+ config.netapp_storage_pools = 'DDP'
+ config.netapp_storage_family = 'eseries'
+ config.netapp_sa_password = 'saPass'
+ config.netapp_controller_ips = '10.11.12.13,10.11.12.14'
+ config.netapp_webservice_path = '/devmgr/v2'
+ config.netapp_enable_multiattach = False
+ return config
+
+
+def deepcopy_return_value_method_decorator(fn):
+ '''Returns a deepcopy of the returned value of the wrapped function.'''
+ def decorator(*args, **kwargs):
+ return copy.deepcopy(fn(*args, **kwargs))
+
+ return decorator
+
+
+def deepcopy_return_value_class_decorator(cls):
+ '''Wraps all 'non-protected' methods of a class with the
+ deepcopy_return_value_method_decorator decorator.
+ '''
+ class NewClass(cls):
+ def __getattribute__(self, attr_name):
+ obj = super(NewClass, self).__getattribute__(attr_name)
+ if (hasattr(obj, '__call__') and not attr_name.startswith('_')
+ and not isinstance(obj, mock.Mock)):
+ return deepcopy_return_value_method_decorator(obj)
+ return obj
+
+ return NewClass
+
+
+@deepcopy_return_value_class_decorator
+class FakeEseriesClient(object):
+
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def list_storage_pools(self):
+ return [STORAGE_POOL]
+
+ def register_storage_system(self, *args, **kwargs):
+ return {
+ 'freePoolSpace': '17055871480319',
+ 'driveCount': 24,
+ 'wwn': '60080E500023C73400000000515AF323',
+ 'id': '1',
+ 'hotSpareSizeAsString': '0',
+ 'hostSparesUsed': 0,
+ 'types': '',
+ 'hostSpareCountInStandby': 0,
+ 'status': 'optimal',
+ 'trayCount': 1,
+ 'usedPoolSpaceAsString': '37452115456',
+ 'ip2': '10.63.165.216',
+ 'ip1': '10.63.165.215',
+ 'freePoolSpaceAsString': '17055871480319',
+ 'hotSpareCount': 0,
+ 'hotSpareSize': '0',
+ 'name': 'stle2600-7_8',
+ 'usedPoolSpace': '37452115456',
+ 'driveTypes': ['sas'],
+ 'unconfiguredSpaceByDriveType': {},
+ 'unconfiguredSpaceAsStrings': '0',
+ 'model': '2650',
+ 'unconfiguredSpace': '0'
+ }
+
+ def list_volumes(self):
+ return [VOLUME]
+
+ def create_host_group(self, name):
+ return MULTIATTACH_HOST_GROUP
+
+ def get_host_group(self, ref):
+ return MULTIATTACH_HOST_GROUP
+
+ def list_host_groups(self):
+ return [MULTIATTACH_HOST_GROUP]
+
+ def get_host_group_by_name(self, name, *args, **kwargs):
+ host_groups = self.list_host_groups()
+ return [host_group for host_group in host_groups
+ if host_group['label'] == name][0]
+
+ def set_host_group_for_host(self, *args, **kwargs):
+ pass
+
+ def create_host_with_port(self, *args, **kwargs):
+ return HOST
+
+ def list_hosts(self):
+ return [HOST, HOST_2]
+
+ def get_host(self, *args, **kwargs):
+ return HOST
+
+ def create_volume_mapping(self, *args, **kwargs):
+ return VOLUME_MAPPING
+
+ def get_volume_mappings(self):
+ return [VOLUME_MAPPING]
+
+ def delete_volume_mapping(self):
+ return
+
+ def move_volume_mapping_via_symbol(self, map_ref, to_ref, lun_id):
+ return {'lun': lun_id}
+
+ def list_storage_system(self):
+ return STORAGE_SYSTEM
+
+ def list_storage_systems(self):
+ return [STORAGE_SYSTEM]
+
+ def list_snapshot_groups(self):
+ return [SNAPSHOT_GROUP]
+
+ def list_snapshot_images(self):
+ return [SNAPSHOT_IMAGE]
+
+ def list_host_types(self):
+ return [
+ {
+ 'id': '4',
+ 'code': 'AIX',
+ 'name': 'AIX',
+ 'index': 4
+ },
+ {
+ 'id': '5',
+ 'code': 'IRX',
+ 'name': 'IRX',
+ 'index': 5
+ },
+ {
+ 'id': '6',
+ 'code': 'LnxALUA',
+ 'name': 'LnxALUA',
+ 'index': 6
+ }
+ ]
+
+ def list_hardware_inventory(self):
+ return HARDWARE_INVENTORY
--- /dev/null
+# Copyright (c) 2015 Alex Meade. All rights reserved.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Mock unit tests for the NetApp E-series iscsi driver
+"""
+
+import copy
+
+import mock
+import six
+
+from cinder import exception
+from cinder import test
+from cinder.tests.unit.volume.drivers.netapp.eseries import fakes as \
+ eseries_fakes
+from cinder.volume.drivers.netapp.eseries import host_mapper
+from cinder.volume.drivers.netapp.eseries import utils
+
+
+def get_fake_volume():
+ return {
+ 'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1,
+ 'volume_name': 'lun1', 'host': 'hostname@backend#DDP',
+ 'os_type': 'linux', 'provider_location': 'lun1',
+ 'name_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef',
+ 'provider_auth': 'provider a b', 'project_id': 'project',
+ 'display_name': None, 'display_description': 'lun1',
+ 'volume_type_id': None, 'migration_status': None, 'attach_status':
+ "detached", "status": "available"
+ }
+
+
+class NetAppEseriesHostMapperTestCase(test.TestCase):
+ def setUp(self):
+ super(NetAppEseriesHostMapperTestCase, self).setUp()
+
+ self.client = eseries_fakes.FakeEseriesClient()
+
+ def test_get_host_mapping_for_vol_frm_array(self):
+ volume_mapping_1 = copy.deepcopy(eseries_fakes.VOLUME_MAPPING)
+ volume_mapping_2 = copy.deepcopy(eseries_fakes.VOLUME_MAPPING)
+ volume_mapping_2['volumeRef'] = '2'
+ self.mock_object(self.client, 'get_volume_mappings',
+ mock.Mock(return_value=[volume_mapping_1,
+ volume_mapping_2]))
+ mappings = host_mapper.get_host_mapping_for_vol_frm_array(
+ self.client, eseries_fakes.VOLUME)
+
+ self.assertEqual([volume_mapping_1], mappings)
+
+ def test_unmap_volume_from_host_volume_mapped_to_host(self):
+ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)
+ fake_eseries_volume['listOfMappings'] = [
+ eseries_fakes.VOLUME_MAPPING
+ ]
+ self.mock_object(self.client, 'list_volumes',
+ mock.Mock(return_value=[fake_eseries_volume]))
+ self.mock_object(self.client, 'delete_volume_mapping')
+
+ host_mapper.unmap_volume_from_host(self.client, get_fake_volume(),
+ eseries_fakes.HOST,
+ eseries_fakes.VOLUME_MAPPING)
+
+ self.assertTrue(self.client.delete_volume_mapping.called)
+
+ def test_unmap_volume_from_host_volume_mapped_to_different_host(self):
+ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)
+ # Mapped to host 1
+ fake_eseries_volume['listOfMappings'] = [
+ eseries_fakes.VOLUME_MAPPING
+ ]
+ self.mock_object(self.client, 'list_volumes',
+ mock.Mock(return_value=[fake_eseries_volume]))
+ self.mock_object(self.client, 'delete_volume_mapping')
+ self.mock_object(self.client, 'get_host_group',
+ mock.Mock(
+ side_effect=exception.NotFound))
+
+ err = self.assertRaises(exception.NetAppDriverException,
+ host_mapper.unmap_volume_from_host,
+ self.client, get_fake_volume(),
+ eseries_fakes.HOST_2,
+ eseries_fakes.VOLUME_MAPPING)
+ self.assertIn("not currently mapped to host", six.text_type(err))
+
+ def test_unmap_volume_from_host_volume_mapped_to_host_group_but_not_host(
+ self):
+ """Ensure an error is raised if the specified host is not in the
+ host group the volume is mapped to.
+ """
+ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)
+ fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING)
+ fake_volume_mapping['mapRef'] = eseries_fakes.MULTIATTACH_HOST_GROUP[
+ 'clusterRef']
+ fake_eseries_volume['listOfMappings'] = [fake_volume_mapping]
+ self.mock_object(self.client, 'list_volumes',
+ mock.Mock(return_value=[fake_eseries_volume]))
+ fake_host = copy.deepcopy(eseries_fakes.HOST)
+ fake_host['clusterRef'] = utils.NULL_REF
+ self.mock_object(self.client, 'list_hosts',
+ mock.Mock(return_value=[fake_host]))
+
+ err = self.assertRaises(exception.NetAppDriverException,
+ host_mapper.unmap_volume_from_host,
+ self.client, get_fake_volume(),
+ fake_host,
+ fake_volume_mapping)
+ self.assertIn("not currently mapped to host", six.text_type(err))
+
+ def test_unmap_volume_from_host_volume_mapped_to_multiattach_host_group(
+ self):
+ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)
+ fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING)
+ fake_volume_mapping['mapRef'] = eseries_fakes.MULTIATTACH_HOST_GROUP[
+ 'clusterRef']
+ fake_eseries_volume['listOfMappings'] = [fake_volume_mapping]
+ self.mock_object(self.client, 'delete_volume_mapping')
+ self.mock_object(self.client, 'list_volumes',
+ mock.Mock(return_value=[fake_eseries_volume]))
+ fake_volume = get_fake_volume()
+ fake_volume['status'] = 'detaching'
+
+ host_mapper.unmap_volume_from_host(self.client, fake_volume,
+ eseries_fakes.HOST,
+ fake_volume_mapping)
+
+ self.assertTrue(self.client.delete_volume_mapping.called)
+
+ def test_unmap_volume_from_host_volume_mapped_to_multiattach_host_group_and_migrating( # noqa
+ self):
+ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)
+ fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING)
+ fake_volume_mapping['mapRef'] = eseries_fakes.MULTIATTACH_HOST_GROUP[
+ 'clusterRef']
+ fake_eseries_volume['listOfMappings'] = [fake_volume_mapping]
+ self.mock_object(self.client, 'delete_volume_mapping')
+ self.mock_object(self.client, 'list_volumes',
+ mock.Mock(return_value=[fake_eseries_volume]))
+ fake_volume = get_fake_volume()
+ fake_volume['status'] = 'in-use'
+
+ host_mapper.unmap_volume_from_host(self.client, fake_volume,
+ eseries_fakes.HOST,
+ fake_volume_mapping)
+
+ self.assertFalse(self.client.delete_volume_mapping.called)
+
+ def test_unmap_volume_from_host_volume_mapped_to_outside_host_group(self):
+ """Ensure we raise error when we find a volume is mapped to an unknown
+ host group that does not have the host.
+ """
+ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)
+ fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING)
+ fake_ref = "8500000060080E500023C7340036035F515B78FD"
+ fake_volume_mapping['mapRef'] = fake_ref
+ fake_eseries_volume['listOfMappings'] = [fake_volume_mapping]
+ self.mock_object(self.client, 'list_volumes',
+ mock.Mock(return_value=[fake_eseries_volume]))
+ fake_host = copy.deepcopy(eseries_fakes.HOST)
+ fake_host['clusterRef'] = utils.NULL_REF
+ self.mock_object(self.client, 'list_hosts',
+ mock.Mock(return_value=[fake_host]))
+ self.mock_object(self.client, 'get_host_group',
+ mock.Mock(return_value=
+ eseries_fakes.FOREIGN_HOST_GROUP))
+
+ err = self.assertRaises(exception.NetAppDriverException,
+ host_mapper.unmap_volume_from_host,
+ self.client, get_fake_volume(),
+ eseries_fakes.HOST,
+ fake_volume_mapping)
+ self.assertIn("unsupported host group", six.text_type(err))
+
+ def test_unmap_volume_from_host_volume_mapped_to_outside_host_group_w_host(
+ self):
+ """Ensure we raise error when we find a volume is mapped to an unknown
+ host group that has the host.
+ """
+ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)
+ fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING)
+ fake_ref = "8500000060080E500023C7340036035F515B78FD"
+ fake_volume_mapping['mapRef'] = fake_ref
+ fake_eseries_volume['clusterRef'] = fake_ref
+ fake_eseries_volume['listOfMappings'] = [fake_volume_mapping]
+ self.mock_object(self.client, 'list_volumes',
+ mock.Mock(return_value=[fake_eseries_volume]))
+ fake_host = copy.deepcopy(eseries_fakes.HOST)
+ fake_host['clusterRef'] = utils.NULL_REF
+ self.mock_object(self.client, 'list_hosts',
+ mock.Mock(return_value=[fake_host]))
+ self.mock_object(self.client, 'get_host_group',
+ mock.Mock(return_value=
+ eseries_fakes.FOREIGN_HOST_GROUP))
+
+ err = self.assertRaises(exception.NetAppDriverException,
+ host_mapper.unmap_volume_from_host,
+ self.client, get_fake_volume(),
+ eseries_fakes.HOST,
+ fake_volume_mapping)
+
+ self.assertIn("unsupported host group", six.text_type(err))
+
+ def test_map_volume_to_single_host_volume_not_mapped(self):
+ self.mock_object(self.client, 'create_volume_mapping',
+ mock.Mock(
+ return_value=eseries_fakes.VOLUME_MAPPING))
+
+ host_mapper.map_volume_to_single_host(self.client, get_fake_volume(),
+ eseries_fakes.VOLUME,
+ eseries_fakes.HOST,
+ None)
+
+ self.assertTrue(self.client.create_volume_mapping.called)
+
+ def test_map_volume_to_single_host_volume_already_mapped_to_target_host(
+ self):
+ """Should be a no-op"""
+ self.mock_object(self.client, 'create_volume_mapping',
+ mock.Mock())
+
+ host_mapper.map_volume_to_single_host(self.client,
+ get_fake_volume(),
+ eseries_fakes.VOLUME,
+ eseries_fakes.HOST,
+ eseries_fakes.VOLUME_MAPPING)
+
+ self.assertFalse(self.client.create_volume_mapping.called)
+
+ def test_map_volume_to_single_host_volume_mapped_to_multiattach_host_group(
+ self):
+ """Should move mapping to target host if volume is not migrating or
+ attached(in-use). If volume is not in use then it should not require a
+ mapping making it ok to sever the mapping to the host group.
+ """
+ fake_mapping_to_other_host = copy.deepcopy(
+ eseries_fakes.VOLUME_MAPPING)
+ fake_mapping_to_other_host['mapRef'] = \
+ eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef']
+ self.mock_object(self.client, 'move_volume_mapping_via_symbol',
+ mock.Mock(return_value={'lun': 5}))
+
+ host_mapper.map_volume_to_single_host(self.client,
+ get_fake_volume(),
+ eseries_fakes.VOLUME,
+ eseries_fakes.HOST,
+ fake_mapping_to_other_host)
+
+ self.assertTrue(self.client.move_volume_mapping_via_symbol.called)
+
+ def test_map_volume_to_single_host_volume_mapped_to_multiattach_host_group_and_migrating( # noqa
+ self):
+ """Should raise error saying multiattach not enabled"""
+ fake_mapping_to_other_host = copy.deepcopy(
+ eseries_fakes.VOLUME_MAPPING)
+ fake_mapping_to_other_host['mapRef'] = \
+ eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef']
+ fake_volume = get_fake_volume()
+ fake_volume['attach_status'] = "attached"
+
+ err = self.assertRaises(exception.NetAppDriverException,
+ host_mapper.map_volume_to_single_host,
+ self.client, fake_volume,
+ eseries_fakes.VOLUME,
+ eseries_fakes.HOST,
+ fake_mapping_to_other_host)
+
+ self.assertIn('multiattach is disabled', six.text_type(err))
+
+ def test_map_volume_to_single_host_volume_mapped_to_multiattach_host_group_and_attached( # noqa
+ self):
+ """Should raise error saying multiattach not enabled"""
+ fake_mapping_to_other_host = copy.deepcopy(
+ eseries_fakes.VOLUME_MAPPING)
+ fake_mapping_to_other_host['mapRef'] = \
+ eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef']
+ fake_volume = get_fake_volume()
+ fake_volume['attach_status'] = "attached"
+
+ err = self.assertRaises(exception.NetAppDriverException,
+ host_mapper.map_volume_to_single_host,
+ self.client, fake_volume,
+ eseries_fakes.VOLUME,
+ eseries_fakes.HOST,
+ fake_mapping_to_other_host)
+
+ self.assertIn('multiattach is disabled', six.text_type(err))
+
+ def test_map_volume_to_single_host_volume_mapped_to_another_host(self):
+ """Should raise error saying multiattach not enabled"""
+ fake_mapping_to_other_host = copy.deepcopy(
+ eseries_fakes.VOLUME_MAPPING)
+ fake_mapping_to_other_host['mapRef'] = eseries_fakes.HOST_2[
+ 'hostRef']
+
+ err = self.assertRaises(exception.NetAppDriverException,
+ host_mapper.map_volume_to_single_host,
+ self.client, get_fake_volume(),
+ eseries_fakes.VOLUME,
+ eseries_fakes.HOST,
+ fake_mapping_to_other_host)
+
+ self.assertIn('multiattach is disabled', six.text_type(err))
+
+ def test_map_volume_to_multiple_hosts_volume_already_mapped_to_target_host(
+ self):
+ """Should be a no-op."""
+ self.mock_object(self.client, 'create_volume_mapping',
+ mock.Mock())
+
+ host_mapper.map_volume_to_multiple_hosts(self.client,
+ get_fake_volume(),
+ eseries_fakes.VOLUME,
+ eseries_fakes.HOST,
+ eseries_fakes.VOLUME_MAPPING)
+
+ self.assertFalse(self.client.create_volume_mapping.called)
+
+ def test_map_volume_to_multiple_hosts_volume_mapped_to_multiattach_host_group( # noqa
+ self):
+ """Should ensure target host is in the multiattach host group."""
+ fake_host = copy.deepcopy(eseries_fakes.HOST_2)
+ fake_host['clusterRef'] = utils.NULL_REF
+
+ fake_mapping_to_host_group = copy.deepcopy(
+ eseries_fakes.VOLUME_MAPPING)
+ fake_mapping_to_host_group['mapRef'] = \
+ eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef']
+
+ self.mock_object(self.client, 'set_host_group_for_host')
+ self.mock_object(self.client, 'get_host_group',
+ mock.Mock(
+ return_value=eseries_fakes.MULTIATTACH_HOST_GROUP)
+ )
+
+ host_mapper.map_volume_to_multiple_hosts(self.client,
+ get_fake_volume(),
+ eseries_fakes.VOLUME,
+ fake_host,
+ fake_mapping_to_host_group)
+
+ self.assertEqual(
+ 1, self.client.set_host_group_for_host.call_count)
+
+ def test_map_volume_to_multiple_hosts_volume_mapped_to_multiattach_host_group_with_lun_collision( # noqa
+ self):
+ """Should ensure target host is in the multiattach host group."""
+ fake_host = copy.deepcopy(eseries_fakes.HOST_2)
+ fake_host['clusterRef'] = utils.NULL_REF
+ fake_mapping_to_host_group = copy.deepcopy(
+ eseries_fakes.VOLUME_MAPPING)
+ fake_mapping_to_host_group['mapRef'] = \
+ eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef']
+ self.mock_object(self.client, 'set_host_group_for_host',
+ mock.Mock(side_effect=exception.NetAppDriverException)
+ )
+
+ self.assertRaises(exception.NetAppDriverException,
+ host_mapper.map_volume_to_multiple_hosts,
+ self.client,
+ get_fake_volume(),
+ eseries_fakes.VOLUME,
+ fake_host,
+ fake_mapping_to_host_group)
+
+ def test_map_volume_to_multiple_hosts_volume_mapped_to_another_host(self):
+ """Should ensure both existing host and destination host are in
+ multiattach host group and move the mapping to the host group.
+ """
+
+ existing_host = copy.deepcopy(eseries_fakes.HOST)
+ existing_host['clusterRef'] = utils.NULL_REF
+ target_host = copy.deepcopy(eseries_fakes.HOST_2)
+ target_host['clusterRef'] = utils.NULL_REF
+ self.mock_object(self.client, 'get_host',
+ mock.Mock(return_value=existing_host))
+ self.mock_object(self.client, 'set_host_group_for_host')
+ self.mock_object(self.client, 'get_host_group',
+ mock.Mock(side_effect=exception.NotFound))
+ mock_move_mapping = mock.Mock(
+ return_value=eseries_fakes.VOLUME_MAPPING_TO_MULTIATTACH_GROUP)
+ self.mock_object(self.client,
+ 'move_volume_mapping_via_symbol',
+ mock_move_mapping)
+
+ host_mapper.map_volume_to_multiple_hosts(self.client,
+ get_fake_volume(),
+ eseries_fakes.VOLUME,
+ target_host,
+ eseries_fakes.VOLUME_MAPPING)
+
+ self.assertEqual(
+ 2, self.client.set_host_group_for_host.call_count)
+
+ self.assertTrue(self.client.move_volume_mapping_via_symbol
+ .called)
+
+ def test_map_volume_to_multiple_hosts_volume_mapped_to_another_host_with_lun_collision_with_source_host( # noqa
+ self):
+ """Should fail attempting to move source host to multiattach host
+ group and raise an error.
+ """
+
+ existing_host = copy.deepcopy(eseries_fakes.HOST)
+ existing_host['clusterRef'] = utils.NULL_REF
+ target_host = copy.deepcopy(eseries_fakes.HOST_2)
+ target_host['clusterRef'] = utils.NULL_REF
+ self.mock_object(self.client, 'get_host',
+ mock.Mock(return_value=existing_host))
+ self.mock_object(self.client, 'set_host_group_for_host',
+ mock.Mock(side_effect=[
+ None,
+ exception.NetAppDriverException
+ ]))
+ self.mock_object(self.client, 'get_host_group',
+ mock.Mock(side_effect=exception.NotFound))
+ mock_move_mapping = mock.Mock(
+ return_value=eseries_fakes.VOLUME_MAPPING_TO_MULTIATTACH_GROUP)
+ self.mock_object(self.client,
+ 'move_volume_mapping_via_symbol',
+ mock_move_mapping)
+
+ self.assertRaises(exception.NetAppDriverException,
+ host_mapper.map_volume_to_multiple_hosts,
+ self.client,
+ get_fake_volume(),
+ eseries_fakes.VOLUME,
+ target_host,
+ eseries_fakes.VOLUME_MAPPING)
+
+ def test_map_volume_to_multiple_hosts_volume_mapped_to_another_host_with_lun_collision_with_dest_host( # noqa
+ self):
+ """Should fail attempting to move destination host to multiattach host
+ group and raise an error.
+ """
+
+ existing_host = copy.deepcopy(eseries_fakes.HOST)
+ existing_host['clusterRef'] = utils.NULL_REF
+ target_host = copy.deepcopy(eseries_fakes.HOST_2)
+ target_host['clusterRef'] = utils.NULL_REF
+ self.mock_object(self.client, 'get_host',
+ mock.Mock(return_value=existing_host))
+ self.mock_object(self.client, 'set_host_group_for_host',
+ mock.Mock(side_effect=[
+ exception.NetAppDriverException,
+ None
+ ]))
+ self.mock_object(self.client, 'get_host_group',
+ mock.Mock(side_effect=exception.NotFound))
+ mock_move_mapping = mock.Mock(
+ return_value=eseries_fakes.VOLUME_MAPPING_TO_MULTIATTACH_GROUP)
+ self.mock_object(self.client,
+ 'move_volume_mapping_via_symbol',
+ mock_move_mapping)
+
+ self.assertRaises(exception.NetAppDriverException,
+ host_mapper.map_volume_to_multiple_hosts,
+ self.client,
+ get_fake_volume(),
+ eseries_fakes.VOLUME,
+ target_host,
+ eseries_fakes.VOLUME_MAPPING)
+
+ def test_map_volume_to_multiple_hosts_volume_mapped_to_foreign_host_group(
+ self):
+ """Should raise an error stating the volume is mapped to an
+ unsupported host group.
+ """
+ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)
+ fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING)
+ fake_ref = "8500000060080E500023C7340036035F515B78FD"
+ fake_volume_mapping['mapRef'] = fake_ref
+ self.mock_object(self.client, 'list_volumes',
+ mock.Mock(return_value=[fake_eseries_volume]))
+ fake_host = copy.deepcopy(eseries_fakes.HOST)
+ fake_host['clusterRef'] = utils.NULL_REF
+ self.mock_object(self.client, 'get_host_group',
+ mock.Mock(return_value=
+ eseries_fakes.FOREIGN_HOST_GROUP))
+
+ err = self.assertRaises(exception.NetAppDriverException,
+ host_mapper.map_volume_to_multiple_hosts,
+ self.client,
+ get_fake_volume(),
+ eseries_fakes.VOLUME,
+ fake_host,
+ fake_volume_mapping)
+ self.assertIn("unsupported host group", six.text_type(err))
+
+ def test_map_volume_to_multiple_hosts_volume_mapped_to_host_in_foreign_host_group( # noqa
+ self):
+ """Should raise an error stating the volume is mapped to a
+ host that is in an unsupported host group.
+ """
+ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)
+ fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING)
+ fake_host = copy.deepcopy(eseries_fakes.HOST_2)
+ fake_host['clusterRef'] = eseries_fakes.FOREIGN_HOST_GROUP[
+ 'clusterRef']
+ fake_volume_mapping['mapRef'] = fake_host['hostRef']
+ fake_eseries_volume['listOfMappings'] = [fake_volume_mapping]
+ self.mock_object(self.client, 'list_volumes',
+ mock.Mock(return_value=[fake_eseries_volume]))
+ self.mock_object(self.client, 'get_host',
+ mock.Mock(return_value=fake_host))
+ self.mock_object(self.client, 'get_host_group',
+ mock.Mock(side_effect=[
+ eseries_fakes.FOREIGN_HOST_GROUP]))
+
+ err = self.assertRaises(exception.NetAppDriverException,
+ host_mapper.map_volume_to_multiple_hosts,
+ self.client,
+ get_fake_volume(),
+ eseries_fakes.VOLUME,
+ eseries_fakes.HOST,
+ fake_volume_mapping)
+
+ self.assertIn("unsupported host group", six.text_type(err))
+
+ def test_map_volume_to_multiple_hosts_volume_target_host_in_foreign_host_group( # noqa
+ self):
+ """Should raise an error stating the target host is in an
+ unsupported host group.
+ """
+ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)
+ fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING)
+ fake_host = copy.deepcopy(eseries_fakes.HOST_2)
+ fake_host['clusterRef'] = eseries_fakes.FOREIGN_HOST_GROUP[
+ 'clusterRef']
+ self.mock_object(self.client, 'list_volumes',
+ mock.Mock(return_value=[fake_eseries_volume]))
+ self.mock_object(self.client, 'get_host',
+ mock.Mock(return_value=eseries_fakes.HOST))
+ self.mock_object(self.client, 'get_host_group',
+ mock.Mock(side_effect=[
+ eseries_fakes.FOREIGN_HOST_GROUP]))
+
+ err = self.assertRaises(exception.NetAppDriverException,
+ host_mapper.map_volume_to_multiple_hosts,
+ self.client,
+ get_fake_volume(),
+ eseries_fakes.VOLUME,
+ fake_host,
+ fake_volume_mapping)
+
+ self.assertIn("unsupported host group", six.text_type(err))
Mock unit tests for the NetApp E-series iscsi driver
"""
+import copy
+
import mock
+import six
+from cinder import exception
from cinder import test
-from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes
+from cinder.tests.unit.volume.drivers.netapp.eseries import fakes as \
+ eseries_fakes
from cinder.volume.drivers.netapp.eseries import client as es_client
+from cinder.volume.drivers.netapp.eseries import host_mapper
from cinder.volume.drivers.netapp.eseries import iscsi as es_iscsi
+from cinder.volume.drivers.netapp.eseries import utils
from cinder.volume.drivers.netapp import utils as na_utils
+def get_fake_volume():
+ return {
+ 'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1,
+ 'volume_name': 'lun1', 'host': 'hostname@backend#DDP',
+ 'os_type': 'linux', 'provider_location': 'lun1',
+ 'name_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef',
+ 'provider_auth': 'provider a b', 'project_id': 'project',
+ 'display_name': None, 'display_description': 'lun1',
+ 'volume_type_id': None, 'migration_status': None, 'attach_status':
+ "detached"
+ }
+
+
class NetAppEseriesISCSIDriverTestCase(test.TestCase):
def setUp(self):
super(NetAppEseriesISCSIDriverTestCase, self).setUp()
- kwargs = {'configuration': self.get_config_eseries()}
+ kwargs = {'configuration':
+ eseries_fakes.create_configuration_eseries()}
self.driver = es_iscsi.NetAppEseriesISCSIDriver(**kwargs)
- self.driver._client = mock.Mock()
-
- def get_config_eseries(self):
- config = na_fakes.create_configuration_eseries()
- config.netapp_storage_protocol = 'iscsi'
- config.netapp_login = 'rw'
- config.netapp_password = 'rw'
- config.netapp_server_hostname = '127.0.0.1'
- config.netapp_transport_type = 'http'
- config.netapp_server_port = '8080'
- config.netapp_storage_pools = 'DDP'
- config.netapp_storage_family = 'eseries'
- config.netapp_sa_password = 'saPass'
- config.netapp_controller_ips = '10.11.12.13,10.11.12.14'
- config.netapp_webservice_path = '/devmgr/v2'
- return config
-
- @mock.patch.object(es_iscsi.NetAppEseriesISCSIDriver,
- '_check_mode_get_or_register_storage_system')
- @mock.patch.object(es_client, 'RestClient', mock.Mock())
- @mock.patch.object(na_utils, 'check_flags', mock.Mock())
- def test_do_setup(self, mock_check_flags):
+ self.driver._client = eseries_fakes.FakeEseriesClient()
+ self.driver.check_for_setup_error()
+
+ def test_do_setup(self):
+ self.mock_object(es_iscsi.NetAppEseriesISCSIDriver,
+ '_check_mode_get_or_register_storage_system')
+ self.mock_object(es_client, 'RestClient',
+ eseries_fakes.FakeEseriesClient)
+ mock_check_flags = self.mock_object(na_utils, 'check_flags')
self.driver.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
self.assertEqual({'test_vg1': {'netapp_disk_encryption': 'false'},
'test_vg2': {'netapp_disk_encryption': 'true'}},
ssc_stats)
+
+ def test_terminate_connection_no_hosts(self):
+ connector = {'initiator': eseries_fakes.INITIATOR_NAME}
+
+ self.mock_object(self.driver._client, 'list_hosts',
+ mock.Mock(return_value=[]))
+
+ self.assertRaises(exception.NotFound,
+ self.driver.terminate_connection,
+ get_fake_volume(),
+ connector)
+
+ def test_terminate_connection_volume_not_mapped(self):
+ connector = {'initiator': eseries_fakes.INITIATOR_NAME}
+ err = self.assertRaises(exception.NetAppDriverException,
+ self.driver.terminate_connection,
+ get_fake_volume(),
+ connector)
+ self.assertIn("not currently mapped to host", six.text_type(err))
+
+ def test_terminate_connection_volume_mapped(self):
+ connector = {'initiator': eseries_fakes.INITIATOR_NAME}
+ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)
+ fake_eseries_volume['listOfMappings'] = [
+ eseries_fakes.VOLUME_MAPPING
+ ]
+ self.mock_object(self.driver._client, 'list_volumes',
+ mock.Mock(return_value=[fake_eseries_volume]))
+ self.mock_object(host_mapper, 'unmap_volume_from_host')
+
+ self.driver.terminate_connection(get_fake_volume(), connector)
+
+ self.assertTrue(host_mapper.unmap_volume_from_host.called)
+
+ def test_terminate_connection_volume_not_mapped_initiator_does_not_exist(
+ self):
+ connector = {'initiator': eseries_fakes.INITIATOR_NAME}
+ self.mock_object(self.driver._client, 'list_hosts',
+ mock.Mock(return_value=[eseries_fakes.HOST_2]))
+ self.assertRaises(exception.NotFound,
+ self.driver.terminate_connection,
+ get_fake_volume(),
+ connector)
+
+ def test_initialize_connection_volume_not_mapped(self):
+ connector = {'initiator': eseries_fakes.INITIATOR_NAME}
+ self.mock_object(self.driver._client, 'get_volume_mappings',
+ mock.Mock(return_value=[]))
+ self.mock_object(host_mapper, 'map_volume_to_single_host',
+ mock.Mock(
+ return_value=eseries_fakes.VOLUME_MAPPING))
+
+ self.driver.initialize_connection(get_fake_volume(), connector)
+
+ self.assertTrue(self.driver._client.get_volume_mappings.called)
+ self.assertTrue(host_mapper.map_volume_to_single_host.called)
+
+ def test_initialize_connection_volume_not_mapped_host_does_not_exist(self):
+ connector = {'initiator': eseries_fakes.INITIATOR_NAME}
+ self.mock_object(self.driver._client, 'get_volume_mappings',
+ mock.Mock(return_value=[]))
+ self.mock_object(self.driver._client, 'list_hosts',
+ mock.Mock(return_value=[]))
+ self.mock_object(self.driver._client, 'create_host_with_port',
+ mock.Mock(return_value=eseries_fakes.HOST))
+ self.mock_object(host_mapper, 'map_volume_to_single_host',
+ mock.Mock(
+ return_value=eseries_fakes.VOLUME_MAPPING))
+
+ self.driver.initialize_connection(get_fake_volume(), connector)
+
+ self.assertTrue(self.driver._client.get_volume_mappings.called)
+ self.assertTrue(self.driver._client.list_hosts.called)
+ self.assertTrue(self.driver._client.create_host_with_port.called)
+ self.assertTrue(host_mapper.map_volume_to_single_host.called)
+
+ def test_initialize_connection_volume_already_mapped_to_target_host(self):
+ """Should be a no-op"""
+ connector = {'initiator': eseries_fakes.INITIATOR_NAME}
+ self.mock_object(host_mapper, 'map_volume_to_single_host',
+ mock.Mock(
+ return_value=eseries_fakes.VOLUME_MAPPING))
+
+ self.driver.initialize_connection(get_fake_volume(), connector)
+
+ self.assertTrue(host_mapper.map_volume_to_single_host.called)
+
+ def test_initialize_connection_volume_mapped_to_another_host(self):
+ """Should raise error saying multiattach not enabled"""
+ connector = {'initiator': eseries_fakes.INITIATOR_NAME}
+ fake_mapping_to_other_host = copy.deepcopy(
+ eseries_fakes.VOLUME_MAPPING)
+ fake_mapping_to_other_host['mapRef'] = eseries_fakes.HOST_2[
+ 'hostRef']
+ self.mock_object(host_mapper, 'map_volume_to_single_host',
+ mock.Mock(
+ side_effect=exception.NetAppDriverException))
+
+ self.assertRaises(exception.NetAppDriverException,
+ self.driver.initialize_connection,
+ get_fake_volume(), connector)
+
+ self.assertTrue(host_mapper.map_volume_to_single_host.called)
+
+
+class NetAppEseriesISCSIDriverMultiAttachTestCase(test.TestCase):
+ """Test driver behavior when the netapp_enable_multiattach
+ configuration option is True.
+ """
+
+ def setUp(self):
+ super(NetAppEseriesISCSIDriverMultiAttachTestCase, self).setUp()
+ config = eseries_fakes.create_configuration_eseries()
+ config.netapp_enable_multiattach = True
+
+ kwargs = {'configuration': config}
+
+ self.driver = es_iscsi.NetAppEseriesISCSIDriver(**kwargs)
+ self.driver._client = eseries_fakes.FakeEseriesClient()
+ self.driver.check_for_setup_error()
+
+ def test_do_setup_host_group_already_exists(self):
+ mock_check_flags = self.mock_object(na_utils, 'check_flags')
+ self.mock_object(es_iscsi.NetAppEseriesISCSIDriver,
+ '_check_mode_get_or_register_storage_system')
+ fake_rest_client = eseries_fakes.FakeEseriesClient()
+ self.mock_object(self.driver, '_create_rest_client',
+ mock.Mock(return_value=fake_rest_client))
+ mock_create = self.mock_object(fake_rest_client, 'create_host_group')
+
+ self.driver.do_setup(mock.Mock())
+
+ self.assertTrue(mock_check_flags.called)
+ self.assertFalse(mock_create.call_count)
+
+ def test_do_setup_host_group_does_not_exist(self):
+ mock_check_flags = self.mock_object(na_utils, 'check_flags')
+ fake_rest_client = eseries_fakes.FakeEseriesClient()
+ self.mock_object(self.driver, '_create_rest_client',
+ mock.Mock(return_value=fake_rest_client))
+ mock_get_host_group = self.mock_object(
+ fake_rest_client, "get_host_group_by_name",
+ mock.Mock(side_effect=exception.NotFound))
+ self.mock_object(es_iscsi.NetAppEseriesISCSIDriver,
+ '_check_mode_get_or_register_storage_system')
+
+ self.driver.do_setup(mock.Mock())
+
+ self.assertTrue(mock_check_flags.called)
+ self.assertTrue(mock_get_host_group.call_count)
+
+ def test_create_volume(self):
+ self.driver._client.create_volume = mock.Mock(
+ return_value=eseries_fakes.VOLUME)
+
+ self.driver.create_volume(get_fake_volume())
+ self.assertTrue(self.driver._client.create_volume.call_count)
+
+ def test_create_volume_too_many_volumes(self):
+ self.driver._client.list_volumes = mock.Mock(
+ return_value=[eseries_fakes.VOLUME for __ in
+ range(utils.MAX_LUNS_PER_HOST_GROUP + 1)])
+ self.driver._client.create_volume = mock.Mock(
+ return_value=eseries_fakes.VOLUME)
+
+ self.assertRaises(exception.NetAppDriverException,
+ self.driver.create_volume,
+ get_fake_volume())
+ self.assertFalse(self.driver._client.create_volume.call_count)
+
+ def test_initialize_connection_volume_not_mapped(self):
+ """Map the volume directly to destination host.
+ """
+ connector = {'initiator': eseries_fakes.INITIATOR_NAME_2}
+ self.mock_object(self.driver._client, 'get_volume_mappings',
+ mock.Mock(return_value=[]))
+ self.mock_object(host_mapper, 'map_volume_to_single_host',
+ mock.Mock(
+ return_value=eseries_fakes.VOLUME_MAPPING))
+
+ self.driver.initialize_connection(get_fake_volume(), connector)
+
+ self.assertTrue(self.driver._client.get_volume_mappings.called)
+ self.assertTrue(host_mapper.map_volume_to_single_host.called)
+
+ def test_initialize_connection_volume_not_mapped_host_does_not_exist(self):
+ """Should create the host map directly to the host."""
+ connector = {'initiator': eseries_fakes.INITIATOR_NAME_2}
+ self.mock_object(self.driver._client, 'list_hosts',
+ mock.Mock(return_value=[]))
+ self.mock_object(self.driver._client, 'create_host_with_port',
+ mock.Mock(
+ return_value=eseries_fakes.HOST_2))
+ self.mock_object(self.driver._client, 'get_volume_mappings',
+ mock.Mock(return_value=[]))
+ self.mock_object(host_mapper, 'map_volume_to_single_host',
+ mock.Mock(
+ return_value=eseries_fakes.VOLUME_MAPPING))
+
+ self.driver.initialize_connection(get_fake_volume(), connector)
+
+ self.assertTrue(self.driver._client.create_host_with_port.called)
+ self.assertTrue(self.driver._client.get_volume_mappings.called)
+ self.assertTrue(host_mapper.map_volume_to_single_host.called)
+
+ def test_initialize_connection_volume_already_mapped(self):
+ """Should be a no-op."""
+ connector = {'initiator': eseries_fakes.INITIATOR_NAME}
+ self.mock_object(host_mapper, 'map_volume_to_multiple_hosts',
+ mock.Mock(
+ return_value=eseries_fakes.VOLUME_MAPPING))
+
+ self.driver.initialize_connection(get_fake_volume(), connector)
+
+ self.assertTrue(host_mapper.map_volume_to_multiple_hosts.called)
config = create_configuration()
config.append_config_values(na_opts.netapp_cluster_opts)
return config
-
-
-def create_configuration_eseries():
- config = create_configuration()
- config.append_config_values(na_opts.netapp_eseries_opts)
- return config
from cinder import exception
from cinder.i18n import _, _LE
+from cinder.volume.drivers.netapp.eseries import utils
LOG = logging.getLogger(__name__)
path = "/storage-systems/{system-id}/volume-mappings/{object-id}"
return self._invoke('DELETE', path, **{'object-id': map_object_id})
+ def move_volume_mapping_via_symbol(self, map_ref, to_ref, lun_id):
+ """Moves a map from one host/host_group object to another."""
+
+ path = "/storage-systems/{system-id}/symbol/moveLUNMapping"
+ data = {'lunMappingRef': map_ref,
+ 'lun': int(lun_id),
+ 'mapRef': to_ref}
+ return_code = self._invoke('POST', path, data)
+ if return_code == 'ok':
+ return {'lun': lun_id}
+ msg = _("Failed to move LUN mapping. Return code: %s") % return_code
+ raise exception.NetAppDriverException(msg)
+
def list_hardware_inventory(self):
"""Lists objects in the hardware inventory."""
path = "/storage-systems/{system-id}/hardware-inventory"
return self._invoke('GET', path)
+ def create_host_group(self, label):
+ """Creates a host group on the array."""
+ path = "/storage-systems/{system-id}/host-groups"
+ data = {'name': label}
+ return self._invoke('POST', path, data)
+
+ def get_host_group(self, host_group_ref):
+ """Gets a single host group from the array."""
+ path = "/storage-systems/{system-id}/host-groups/{object-id}"
+ try:
+ return self._invoke('GET', path, **{'object-id': host_group_ref})
+ except exception.NetAppDriverException:
+ raise exception.NotFound(_("Host group with ref %s not found") %
+ host_group_ref)
+
+ def get_host_group_by_name(self, name):
+ """Gets a single host group by name from the array."""
+ host_groups = self.list_host_groups()
+ matching = [host_group for host_group in host_groups
+ if host_group['label'] == name]
+ if len(matching):
+ return matching[0]
+ raise exception.NotFound(_("Host group with name %s not found") % name)
+
+ def list_host_groups(self):
+ """Lists host groups on the array."""
+ path = "/storage-systems/{system-id}/host-groups"
+ return self._invoke('GET', path)
+
def list_hosts(self):
"""Lists host objects in the system."""
path = "/storage-systems/{system-id}/hosts"
port = {'type': port_type, 'port': port_id, 'label': port_label}
return self.create_host(label, host_type, [port], group_id)
- def update_host_type(self, host_ref, host_type):
+ def update_host(self, host_ref, data):
"""Updates host type for a given host."""
path = "/storage-systems/{system-id}/hosts/{object-id}"
- data = {'hostType': host_type}
return self._invoke('POST', path, data, **{'object-id': host_ref})
+ def get_host(self, host_ref):
+ """Gets a single host from the array."""
+ path = "/storage-systems/{system-id}/hosts/{object-id}"
+ return self._invoke('GET', path, **{'object-id': host_ref})
+
+ def update_host_type(self, host_ref, host_type):
+ """Updates host type for a given host."""
+ data = {'hostType': host_type}
+ return self.update_host(host_ref, data)
+
+ def set_host_group_for_host(self, host_ref, host_group_ref=utils.NULL_REF):
+ """Sets or clears which host group a host is in."""
+ data = {'groupId': host_group_ref}
+ self.update_host(host_ref, data)
+
def list_host_types(self):
"""Lists host types in storage system."""
path = "/storage-systems/{system-id}/host-types"
--- /dev/null
+# Copyright (c) 2015 Alex Meade. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from cinder import exception
+from cinder.i18n import _
+
+
+class VolumeNotMapped(exception.NetAppDriverException):
+ message = _("Volume %(volume_id)s is not currently mapped to host "
+ "%(host)s")
+
+
+class UnsupportedHostGroup(exception.NetAppDriverException):
+ message = _("Volume %(volume_id)s is currently mapped to unsupported "
+ "host group %(group)s")
--- /dev/null
+# Copyright (c) 2015 Alex Meade. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+""" This module handles mapping E-Series volumes to E-Series Hosts and Host
+Groups.
+"""
+
+from oslo_log import log as logging
+from six.moves import xrange
+
+from cinder import exception
+from cinder.i18n import _
+from cinder import utils as cinder_utils
+from cinder.volume.drivers.netapp.eseries import exception as eseries_exc
+from cinder.volume.drivers.netapp.eseries import utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+@cinder_utils.synchronized('map_es_volume')
+def map_volume_to_single_host(client, volume, eseries_vol, host,
+ vol_map):
+ """Maps the e-series volume to host with initiator."""
+ msg = "Attempting to map volume %s to single host."
+ LOG.debug(msg % volume['id'])
+
+ # If volume is not mapped on the backend, map directly to host
+ if not vol_map:
+ mappings = _get_vol_mapping_for_host_frm_array(client, host['hostRef'])
+ lun = _get_free_lun(client, host, mappings)
+ return client.create_volume_mapping(eseries_vol['volumeRef'],
+ host['hostRef'], lun)
+
+ # If volume is already mapped to desired host
+ if vol_map.get('mapRef') == host['hostRef']:
+ return vol_map
+
+ multiattach_cluster_ref = None
+ try:
+ host_group = client.get_host_group_by_name(
+ utils.MULTI_ATTACH_HOST_GROUP_NAME)
+ multiattach_cluster_ref = host_group['clusterRef']
+ except exception.NotFound:
+ pass
+
+ # Volume is mapped to the multiattach host group
+ if vol_map.get('mapRef') == multiattach_cluster_ref:
+ LOG.debug("Volume %s is mapped to multiattach host group."
+ % volume['id'])
+
+ # If volume is not currently attached according to Cinder, it is
+ # safe to delete the mapping
+ if not (volume['attach_status'] == 'attached'):
+ msg = (_("Volume %(vol)s is not currently attached, "
+ "moving existing mapping to host %(host)s.")
+ % {'vol': volume['id'], 'host': host['label']})
+ LOG.debug(msg)
+ mappings = _get_vol_mapping_for_host_frm_array(
+ client, host['hostRef'])
+ lun = _get_free_lun(client, host, mappings)
+ return client.move_volume_mapping_via_symbol(
+ vol_map.get('mapRef'), host['hostRef'], lun
+ )
+
+ # If we got this far, volume must be mapped to something else
+ msg = _("Cannot attach already attached volume %s; "
+ "multiattach is disabled via the "
+ "'netapp_enable_multiattach' configuration option.")
+ raise exception.NetAppDriverException(msg % volume['id'])
+
+
+@cinder_utils.synchronized('map_es_volume')
+def map_volume_to_multiple_hosts(client, volume, eseries_vol, target_host,
+ mapping):
+ """Maps the e-series volume to multiattach host group."""
+
+ msg = "Attempting to map volume %s to multiple hosts."
+ LOG.debug(msg % volume['id'])
+
+ # If volume is already mapped to desired host, return the mapping
+ if mapping['mapRef'] == target_host['hostRef']:
+ LOG.debug("Volume %(vol)s already mapped to host %(host)s",
+ {'vol': volume['id'], 'host': target_host['label']})
+ return mapping
+
+ # If target host in a host group, ensure it is the multiattach host group
+ if target_host['clusterRef'] != utils.NULL_REF:
+ host_group = client.get_host_group(target_host[
+ 'clusterRef'])
+ if host_group['label'] != utils.MULTI_ATTACH_HOST_GROUP_NAME:
+ msg = _("Specified host to map to volume %(vol)s is in "
+ "unsupported host group with %(group)s.")
+ params = {'vol': volume['id'], 'group': host_group['label']}
+ raise eseries_exc.UnsupportedHostGroup(msg % params)
+
+ mapped_host_group = None
+ multiattach_host_group = None
+ try:
+ mapped_host_group = client.get_host_group(mapping['mapRef'])
+ # If volume is mapped to a foreign host group raise an error
+ if mapped_host_group['label'] != utils.MULTI_ATTACH_HOST_GROUP_NAME:
+ raise eseries_exc.UnsupportedHostGroup(
+ volume_id=volume['id'], group=mapped_host_group['label'])
+ multiattach_host_group = mapped_host_group
+ except exception.NotFound:
+ pass
+
+ if not multiattach_host_group:
+ multiattach_host_group = client.get_host_group_by_name(
+ utils.MULTI_ATTACH_HOST_GROUP_NAME)
+
+ # If volume is mapped directly to a host, move the host into the
+ # multiattach host group. Error if the host is in a foreign host group
+ if not mapped_host_group:
+ current_host = client.get_host(mapping['mapRef'])
+ if current_host['clusterRef'] != utils.NULL_REF:
+ host_group = client.get_host_group(current_host[
+ 'clusterRef'])
+ if host_group['label'] != utils.MULTI_ATTACH_HOST_GROUP_NAME:
+ msg = _("Currently mapped host for volume %(vol)s is in "
+ "unsupported host group with %(group)s.")
+ params = {'vol': volume['id'], 'group': host_group['label']}
+ raise eseries_exc.UnsupportedHostGroup(msg % params)
+ client.set_host_group_for_host(current_host['hostRef'],
+ multiattach_host_group['clusterRef'])
+
+ # Move destination host into multiattach host group
+ client.set_host_group_for_host(target_host[
+ 'hostRef'], multiattach_host_group['clusterRef'])
+
+ # Once both existing and target hosts are in the multiattach host group,
+ # move the volume mapping to said group.
+ if not mapped_host_group:
+ msg = "Moving mapping for volume %s to multiattach host group."
+ LOG.debug(msg % volume['id'])
+ return client.move_volume_mapping_via_symbol(
+ mapping.get('lunMappingRef'),
+ multiattach_host_group['clusterRef'],
+ mapping['lun']
+ )
+
+ return mapping
+
+
+def _get_free_lun(client, host, maps=None):
+ """Gets free LUN for given host."""
+ ref = host['hostRef']
+ luns = maps or _get_vol_mapping_for_host_frm_array(client, ref)
+ if host['clusterRef'] != utils.NULL_REF:
+ host_group_maps = _get_vol_mapping_for_host_group_frm_array(
+ client, host['clusterRef'])
+ luns.extend(host_group_maps)
+ used_luns = set(map(lambda lun: int(lun['lun']), luns))
+ for lun in xrange(utils.MAX_LUNS_PER_HOST):
+ if lun not in used_luns:
+ return lun
+ msg = _("No free LUNs. Host might have exceeded max number of LUNs.")
+ raise exception.NetAppDriverException(msg)
+
+
+def _get_vol_mapping_for_host_frm_array(client, host_ref):
+ """Gets all volume mappings for given host from array."""
+ mappings = client.get_volume_mappings() or []
+ host_maps = filter(lambda x: x.get('mapRef') == host_ref, mappings)
+ return host_maps
+
+
+def _get_vol_mapping_for_host_group_frm_array(client, hg_ref):
+ """Gets all volume mappings for given host from array."""
+ mappings = client.get_volume_mappings() or []
+ hg_maps = filter(lambda x: x.get('mapRef') == hg_ref, mappings)
+ return hg_maps
+
+
+def unmap_volume_from_host(client, volume, host, mapping):
+ # Volume is mapped directly to host, so delete the mapping
+ if mapping.get('mapRef') == host['hostRef']:
+ msg = ("Volume %(vol)s is mapped directly to host %(host)s; removing "
+ "mapping.")
+ LOG.debug(msg % {'vol': volume['id'], 'host': host['label']})
+ client.delete_volume_mapping(mapping['lunMappingRef'])
+ return
+
+ try:
+ host_group = client.get_host_group(mapping['mapRef'])
+ except exception.NotFound:
+ # Volumes is mapped but to a different initiator
+ raise eseries_exc.VolumeNotMapped(volume_id=volume['id'],
+ host=host['label'])
+ # If volume is mapped to a foreign host group raise error
+ if host_group['label'] != utils.MULTI_ATTACH_HOST_GROUP_NAME:
+ raise eseries_exc.UnsupportedHostGroup(volume_id=volume['id'],
+ group=host_group['label'])
+ # If target host is not in the multiattach host group
+ if host['clusterRef'] != host_group['clusterRef']:
+ raise eseries_exc.VolumeNotMapped(volume_id=volume['id'],
+ host=host['label'])
+
+ # Volume is mapped to multiattach host group
+ # Remove mapping if volume should no longer be attached after this
+ # operation.
+ if volume['status'] == 'detaching':
+ msg = ("Volume %s is mapped directly to multiattach host group "
+ "but is not currently attached; removing mapping.")
+ LOG.debug(msg % volume['id'])
+ client.delete_volume_mapping(mapping['lunMappingRef'])
+
+
+def get_host_mapping_for_vol_frm_array(client, volume):
+ """Gets all host mappings for given volume from array."""
+ mappings = client.get_volume_mappings() or []
+ host_maps = filter(lambda x: x.get('volumeRef') == volume['volumeRef'],
+ mappings)
+ return host_maps
from cinder import utils as cinder_utils
from cinder.volume import driver
from cinder.volume.drivers.netapp.eseries import client
+from cinder.volume.drivers.netapp.eseries import exception as eseries_exc
+from cinder.volume.drivers.netapp.eseries import host_mapper
from cinder.volume.drivers.netapp.eseries import utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
'netapp_login', 'netapp_password',
'netapp_storage_pools']
SLEEP_SECS = 5
- MAX_LUNS_PER_HOST = 255
HOST_TYPES = {'aix': 'AIX MPIO',
'avt': 'AVT_4M',
'factoryDefault': 'FactoryDefault',
self.context = context
na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
- port = self.configuration.netapp_server_port
- scheme = self.configuration.netapp_transport_type.lower()
+ self._client = self._create_rest_client(self.configuration)
+ self._check_mode_get_or_register_storage_system()
+ if self.configuration.netapp_enable_multiattach:
+ self._ensure_multi_attach_host_group_exists()
+
+ def _create_rest_client(self, configuration):
+ port = configuration.netapp_server_port
+ scheme = configuration.netapp_transport_type.lower()
if port is None:
if scheme == 'http':
port = 8080
elif scheme == 'https':
port = 8443
- self._client = client.RestClient(
+ return client.RestClient(
scheme=scheme,
- host=self.configuration.netapp_server_hostname,
+ host=configuration.netapp_server_hostname,
port=port,
- service_path=self.configuration.netapp_webservice_path,
- username=self.configuration.netapp_login,
- password=self.configuration.netapp_password)
- self._check_mode_get_or_register_storage_system()
+ service_path=configuration.netapp_webservice_path,
+ username=configuration.netapp_login,
+ password=configuration.netapp_password)
def _start_periodic_tasks(self):
ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
'mpflag': 'use_multipath_for_image_xfer'}
LOG.warning(msg)
+ def _ensure_multi_attach_host_group_exists(self):
+ try:
+ host_group = self._client.get_host_group_by_name(
+ utils.MULTI_ATTACH_HOST_GROUP_NAME)
+ msg = _LI("The multi-attach E-Series host group '%(label)s' "
+ "already exists with clusterRef %(clusterRef)s")
+ LOG.info(msg % host_group)
+ except exception.NotFound:
+ host_group = self._client.create_host_group(
+ utils.MULTI_ATTACH_HOST_GROUP_NAME)
+ msg = _LI("Created multi-attach E-Series host group '%(label)s' "
+ "with clusterRef %(clusterRef)s")
+ LOG.info(msg % host_group)
+
def _check_mode_get_or_register_storage_system(self):
"""Does validity checks for storage system registry and health."""
def _resolve_host(host):
sn_gp[group_id]['images'] = sn_gp[group_id].get('images') or []
sn_gp[group_id]['images'].append(image)
- def _cache_vol_mapping(self, mapping):
- """Caches volume mapping in volume object."""
- vol_id = mapping['volumeRef']
- volume = self._objects['volumes']['ref_vol'][vol_id]
- volume['listOfMappings'] = volume.get('listOfMappings') or []
- for mapp in volume['listOfMappings']:
- if mapp['lunMappingRef'] == mapping['lunMappingRef']:
- return
- volume['listOfMappings'].append(mapping)
-
def _del_volume_frm_cache(self, label):
"""Deletes volume from cache."""
vol_id = self._objects['volumes']['label_ref'].get(label)
else:
LOG.debug("Snapshot %s not cached.", obj_name)
- def _del_vol_mapping_frm_cache(self, mapping):
- """Deletes volume mapping under cached volume."""
- vol_id = mapping['volumeRef']
- volume = self._objects['volumes']['ref_vol'].get(vol_id) or {}
- mappings = volume.get('listOfMappings') or []
- try:
- mappings.remove(mapping)
- except ValueError:
- LOG.debug("Mapping with id %s already removed.",
- mapping['lunMappingRef'])
-
def _get_volume(self, uid):
label = utils.convert_uuid_to_es_fmt(uid)
return self._get_volume_with_label_wwn(label)
if not (label or wwn):
raise exception.InvalidInput(_('Either volume label or wwn'
' is required as input.'))
- try:
- return self._get_cached_volume(label)
- except KeyError:
- wwn = wwn.replace(':', '').upper() if wwn else None
- for vol in self._client.list_volumes():
- if label and vol.get('label') != label:
- continue
- if wwn and vol.get(self.WORLDWIDENAME).upper() != wwn:
- continue
- self._cache_volume(vol)
- label = vol.get('label')
- break
- return self._get_cached_volume(label)
+ wwn = wwn.replace(':', '').upper() if wwn else None
+ eseries_volume = None
+ for vol in self._client.list_volumes():
+ if label and vol.get('label') != label:
+ continue
+ if wwn and vol.get(self.WORLDWIDENAME).upper() != wwn:
+ continue
+ eseries_volume = vol
+ self._cache_volume(vol)
+ break
+
+ if not eseries_volume:
+ raise KeyError()
+ return eseries_volume
def _get_cached_volume(self, label):
vol_id = self._objects['volumes']['label_ref'][label]
size_gb):
"""Creates volume with given label and size."""
+ if self.configuration.netapp_enable_multiattach:
+ volumes = self._client.list_volumes()
+ # NOTE(ameade): Ensure we do not create more volumes than we could
+ # map to the multi attach ESeries host group.
+ if len(volumes) > utils.MAX_LUNS_PER_HOST_GROUP:
+ msg = (_("Cannot create more than %(req)s volumes on the "
+ "ESeries array when 'netapp_enable_multiattach' is "
+ "set to true.") %
+ {'req': utils.MAX_LUNS_PER_HOST_GROUP})
+ raise exception.NetAppDriverException(msg)
+
target_pool = None
pools = self._client.list_storage_pools()
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
initiator_name = connector['initiator']
- vol = self._get_volume(volume['name_id'])
- iscsi_details = self._get_iscsi_service_details()
- iscsi_portal = self._get_iscsi_portal_for_vol(vol, iscsi_details)
- mapping = self._map_volume_to_host(vol, initiator_name)
+ eseries_vol = self._get_volume(volume['name_id'])
+ existing_maps = host_mapper.get_host_mapping_for_vol_frm_array(
+ self._client, eseries_vol)
+ host = self._get_or_create_host(initiator_name, self.host_type)
+ # There can only be one or zero mappings on a volume in E-Series
+ current_map = existing_maps[0] if existing_maps else None
+
+ if self.configuration.netapp_enable_multiattach and current_map:
+ self._ensure_multi_attach_host_group_exists()
+ mapping = host_mapper.map_volume_to_multiple_hosts(self._client,
+ volume,
+ eseries_vol,
+ host,
+ current_map)
+ else:
+ mapping = host_mapper.map_volume_to_single_host(self._client,
+ volume,
+ eseries_vol,
+ host,
+ current_map)
+
lun_id = mapping['lun']
- self._cache_vol_mapping(mapping)
msg = _("Mapped volume %(id)s to the initiator %(initiator_name)s.")
msg_fmt = {'id': volume['id'], 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
+
+ iscsi_details = self._get_iscsi_service_details()
+ iscsi_portal = self._get_iscsi_portal_for_vol(eseries_vol,
+ iscsi_details)
msg = _("Successfully fetched target details for volume %(id)s and "
"initiator %(initiator_name)s.")
LOG.debug(msg % msg_fmt)
raise exception.NetAppDriverException(
msg % self._client.get_system_id())
- @cinder_utils.synchronized('map_es_volume')
- def _map_volume_to_host(self, vol, initiator):
- """Maps the e-series volume to host with initiator."""
- host = self._get_or_create_host(initiator, self.host_type)
- vol_maps = self._get_host_mapping_for_vol_frm_array(vol)
- for vol_map in vol_maps:
- if vol_map.get('mapRef') == host['hostRef']:
- return vol_map
- else:
- self._client.delete_volume_mapping(vol_map['lunMappingRef'])
- self._del_vol_mapping_frm_cache(vol_map)
- mappings = self._get_vol_mapping_for_host_frm_array(host['hostRef'])
- lun = self._get_free_lun(host, mappings)
- return self._client.create_volume_mapping(vol['volumeRef'],
- host['hostRef'], lun)
-
def _get_or_create_host(self, port_id, host_type):
"""Fetch or create a host by given port."""
try:
host = self._get_host_with_port(port_id)
ht_def = self._get_host_type_definition(host_type)
- if host.get('hostTypeIndex') == ht_def.get('index'):
- return host
- else:
+ if host.get('hostTypeIndex') != ht_def.get('index'):
try:
- return self._client.update_host_type(
+ host = self._client.update_host_type(
host['hostRef'], ht_def)
except exception.NetAppDriverException as e:
msg = _LW("Unable to update host type for host with "
"label %(l)s. %(e)s")
LOG.warning(msg % {'l': host['label'], 'e': e.msg})
- return host
+ return host
except exception.NotFound as e:
LOG.warning(_LW("Message - %s."), e.msg)
return self._create_host(port_id, host_type)
msg = _("Host with port %(port)s not found.")
raise exception.NotFound(msg % {'port': port_id})
- def _create_host(self, port_id, host_type):
+ def _create_host(self, port_id, host_type, host_group=None):
"""Creates host on system with given initiator as port_id."""
LOG.info(_LI("Creating host with port %s."), port_id)
label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
port_label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
host_type = self._get_host_type_definition(host_type)
return self._client.create_host_with_port(label, host_type,
- port_id, port_label)
+ port_id, port_label,
+ group_id=host_group)
def _get_host_type_definition(self, host_type):
"""Gets supported host type if available on storage system."""
return ht
raise exception.NotFound(_("Host type %s not supported.") % host_type)
- def _get_free_lun(self, host, maps=None):
- """Gets free LUN for given host."""
- ref = host['hostRef']
- luns = maps or self._get_vol_mapping_for_host_frm_array(ref)
- used_luns = set(map(lambda lun: int(lun['lun']), luns))
- for lun in xrange(self.MAX_LUNS_PER_HOST):
- if lun not in used_luns:
- return lun
- msg = _("No free LUNs. Host might exceeded max LUNs.")
- raise exception.NetAppDriverException(msg)
-
- def _get_vol_mapping_for_host_frm_array(self, host_ref):
- """Gets all volume mappings for given host from array."""
- mappings = self._client.get_volume_mappings() or []
- host_maps = filter(lambda x: x.get('mapRef') == host_ref, mappings)
- return host_maps
-
- def _get_host_mapping_for_vol_frm_array(self, volume):
- """Gets all host mappings for given volume from array."""
- mappings = self._client.get_volume_mappings() or []
- host_maps = filter(lambda x: x.get('volumeRef') == volume['volumeRef'],
- mappings)
- return host_maps
-
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
- vol = self._get_volume(volume['name_id'])
- host = self._get_host_with_port(connector['initiator'])
- mapping = self._get_cached_vol_mapping_for_host(vol, host)
- self._client.delete_volume_mapping(mapping['lunMappingRef'])
- self._del_vol_mapping_frm_cache(mapping)
-
- def _get_cached_vol_mapping_for_host(self, volume, host):
- """Gets cached volume mapping for given host."""
- mappings = volume.get('listOfMappings') or []
- for mapping in mappings:
- if mapping.get('mapRef') == host['hostRef']:
- return mapping
- msg = _("Mapping not found for %(vol)s to host %(ht)s.")
- raise exception.NotFound(msg % {'vol': volume['volumeRef'],
- 'ht': host['hostRef']})
+ eseries_vol = self._get_volume(volume['name_id'])
+ initiator = connector['initiator']
+ host = self._get_host_with_port(initiator)
+ mappings = eseries_vol.get('listOfMappings', [])
+
+ # There can only be one or zero mappings on a volume in E-Series
+ mapping = mappings[0] if mappings else None
+
+ if not mapping:
+ raise eseries_exc.VolumeNotMapped(volume_id=volume['id'],
+ host=host['label'])
+ host_mapper.unmap_volume_from_host(self._client, volume, host, mapping)
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service."""
LOG = logging.getLogger(__name__)
+MULTI_ATTACH_HOST_GROUP_NAME = 'cinder-multi-attach'
+NULL_REF = '0000000000000000000000000000000000000000'
+MAX_LUNS_PER_HOST = 255
+MAX_LUNS_PER_HOST_GROUP = 256
+
def encode_hex_to_base32(hex_string):
"""Encodes hex to base32 bit as per RFC4648."""
'specified storage pools. Only dynamic disk pools are '
'currently supported. Specify the value of this option to'
' be a comma separated list of disk pool names to be used'
- ' for provisioning.')), ]
+ ' for provisioning.')),
+ cfg.BoolOpt('netapp_enable_multiattach',
+ default=True,
+ help='This option specifies whether the driver should allow '
+ 'operations that require multiple attachments to a '
+ 'volume. An example would be live migration of servers '
+ 'that have volumes attached. When enabled, this backend '
+ 'is limited to 256 total volumes in order to '
+ 'guarantee volumes can be accessed by more than one '
+ 'host.'),
+]
netapp_nfs_extra_opts = [
cfg.StrOpt('netapp_copyoffload_tool_path',
default=None,