class BrocadeZoningCliException(CinderException):
message = _("Fibre Channel Zoning CLI error: %(reason)s")
+
+
+class NetAppDriverException(VolumeDriverException):
+ message = _("NetApp Cinder Driver exception.")
--- /dev/null
+# Copyright (c) 2014 NetApp, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests for NetApp e-series iscsi volume driver.
+"""
+
+import json
+import mock
+import re
+import requests
+
+from cinder import exception
+from cinder.openstack.common import log as logging
+from cinder import test
+from cinder.volume import configuration as conf
+from cinder.volume.drivers.netapp import common
+from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
+from cinder.volume.drivers.netapp.options import netapp_eseries_opts
+
+
+LOG = logging.getLogger(__name__)
+
+
+def create_configuration():
+ configuration = conf.Configuration(None)
+ configuration.append_config_values(netapp_basicauth_opts)
+ configuration.append_config_values(netapp_eseries_opts)
+ return configuration
+
+
+class FakeEseriesResponse(object):
+ """Fake response to requests."""
+
+ def __init__(self, code=None, text=None):
+ self.status_code = code
+ self.text = text
+
+ def json(self):
+ return json.loads(self.text)
+
+
+class FakeEseriesServerHandler(object):
+ """HTTP handler that fakes enough stuff to allow the driver to run."""
+
+ def do_GET(self, path, params, data, headers):
+ """Respond to a GET request."""
+
+ response = FakeEseriesResponse()
+ if "/devmgr/vn" not in path:
+ response.status_code = 404
+
+ (__, ___, path) = path.partition("/devmgr/vn")
+ if re.match("^/storage-systems/[0-9a-zA-Z]+/volumes$", path):
+ response.status_code = 200
+ response.text = """[{"extremeProtection": false,
+ "pitBaseVolume": false,
+ "dssMaxSegmentSize": 131072,
+ "totalSizeInBytes": "2126008832", "raidLevel": "raid6",
+ "volumeRef": "0200000060080E500023C73400000AAA52D11677",
+ "listOfMappings": [], "sectorOffset": "6",
+ "id": "0200000060080E500023C73400000AAA52D11677",
+ "wwn": "60080E500023C73400000AAA52D11677",
+ "capacity": "2126008832", "mgmtClientAttribute": 0,
+ "label": "repos_0006", "volumeFull": false,
+ "blkSize": 512, "volumeCopyTarget": false,
+ "volumeGroupRef":
+ "0400000060080E500023BB3400001F9F52CECC3F",
+ "preferredControllerId": "070000000000000000000002",
+ "currentManager": "070000000000000000000002",
+ "applicationTagOwned": true, "status": "optimal",
+ "segmentSize": 131072, "volumeUse":
+ "freeRepositoryVolume", "action": "none",
+ "name": "repos_0006", "worldWideName":
+ "60080E500023C73400000AAA52D11677", "currentControllerId"
+ : "070000000000000000000002",
+ "protectionInformationCapable": false, "mapped": false,
+ "reconPriority": 1, "protectionType": "type0Protection"}
+ ,
+ {"extremeProtection": false, "pitBaseVolume": true,
+ "dssMaxSegmentSize": 131072,
+ "totalSizeInBytes": "2147483648", "raidLevel": "raid6",
+ "volumeRef": "0200000060080E500023BB3400001FC352D14CB2",
+ "listOfMappings": [], "sectorOffset": "15",
+ "id": "0200000060080E500023BB3400001FC352D14CB2",
+ "wwn": "60080E500023BB3400001FC352D14CB2",
+ "capacity": "2147483648", "mgmtClientAttribute": 0,
+ "label": "bdm-vc-test-1", "volumeFull": false,
+ "blkSize": 512, "volumeCopyTarget": false,
+ "volumeGroupRef":
+ "0400000060080E500023BB3400001F9F52CECC3F",
+ "preferredControllerId": "070000000000000000000001",
+ "currentManager": "070000000000000000000001",
+ "applicationTagOwned": false, "status": "optimal",
+ "segmentSize": 131072, "volumeUse": "standardVolume",
+ "action": "none", "preferredManager":
+ "070000000000000000000001", "volumeHandle": 15,
+ "offline": false, "preReadRedundancyCheckEnabled": false,
+ "dssPreallocEnabled": false, "name": "bdm-vc-test-1",
+ "worldWideName": "60080E500023BB3400001FC352D14CB2",
+ "currentControllerId": "070000000000000000000001",
+ "protectionInformationCapable": false, "mapped": false,
+ "reconPriority": 1, "protectionType":
+ "type1Protection"}]"""
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/volumes/[0-9A-Za-z]+$",
+ path):
+ response.status_code = 200
+ response.text = """{"extremeProtection": false,
+ "pitBaseVolume": true,
+ "dssMaxSegmentSize": 131072,
+ "totalSizeInBytes": "2147483648", "raidLevel": "raid6",
+ "volumeRef": "0200000060080E500023BB3400001FC352D14CB2",
+ "listOfMappings": [], "sectorOffset": "15",
+ "id": "0200000060080E500023BB3400001FC352D14CB2",
+ "wwn": "60080E500023BB3400001FC352D14CB2",
+ "capacity": "2147483648", "mgmtClientAttribute": 0,
+ "label": "bdm-vc-test-1", "volumeFull": false,
+ "blkSize": 512, "volumeCopyTarget": false,
+ "volumeGroupRef":
+ "0400000060080E500023BB3400001F9F52CECC3F",
+ "preferredControllerId": "070000000000000000000001",
+ "currentManager": "070000000000000000000001",
+ "applicationTagOwned": false, "status": "optimal",
+ "segmentSize": 131072, "volumeUse": "standardVolume",
+ "action": "none", "preferredManager":
+ "070000000000000000000001", "volumeHandle": 15,
+ "offline": false, "preReadRedundancyCheckEnabled": false,
+ "dssPreallocEnabled": false, "name": "bdm-vc-test-1",
+ "worldWideName": "60080E500023BB3400001FC352D14CB2",
+ "currentControllerId": "070000000000000000000001",
+ "protectionInformationCapable": false, "mapped": false,
+ "reconPriority": 1, "protectionType":
+ "type1Protection"}"""
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/hardware-inventory$",
+ path):
+ response.status_code = 200
+ response.text = """
+ {"iscsiPorts": [{"controllerId":
+ "070000000000000000000002", "ipv4Enabled": true,
+ "ipv4Data": {"ipv4Address":
+ "0.0.0.0", "ipv4AddressConfigMethod": "configStatic",
+ "ipv4VlanId": {"isEnabled": false, "value": 0},
+ "ipv4AddressData": {"ipv4Address": "172.20.123.66",
+ "ipv4SubnetMask": "255.255.255.0", "configState":
+ "configured", "ipv4GatewayAddress": "0.0.0.0"}},
+ "tcpListenPort": 3260,
+ "interfaceRef": "2202040000000000000000000000000000000000"
+ ,"iqn":
+ "iqn.1992-01.com.lsi:2365.60080e500023c73400000000515af323"
+ }]}"""
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/hosts$", path):
+ response.status_code = 200
+ response.text = """[{"isSAControlled": false,
+ "confirmLUNMappingCreation"
+ : false, "label": "stlrx300s7-55", "isLargeBlockFormatHost":
+ false, "clusterRef": "8500000060080E500023C7340036035F515B78FC",
+ "protectionInformationCapableAccessMethod": false,
+ "ports": [], "hostRef":
+ "8400000060080E500023C73400300381515BFBA3", "hostTypeIndex": 6,
+ "hostSidePorts": [{"label": "NewStore", "type": "iscsi",
+ "address": "iqn.1998-01.com.vmware:localhost-28a58148"}]}]"""
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/host-types$", path):
+ response.status_code = 200
+ response.text = """[{
+ "id" : "4",
+ "code" : "AIX",
+ "name" : "AIX",
+ "index" : 4
+ }, {
+ "id" : "5",
+ "code" : "IRX",
+ "name" : "IRX",
+ "index" : 5
+ }, {
+ "id" : "6",
+ "code" : "LNX",
+ "name" : "Linux",
+ "index" : 6
+ }]"""
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-groups$", path):
+ response.status_code = 200
+ response.text = """[]"""
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-images$", path):
+ response.status_code = 200
+ response.text = """[]"""
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/storage-pools$", path):
+ response.status_code = 200
+ response.text = """[ {"protectionInformationCapabilities":
+ {"protectionInformationCapable": true, "protectionType":
+ "type2Protection"}, "raidLevel": "raidDiskPool", "reserved1":
+ "000000000000000000000000", "reserved2": "", "isInaccessible":
+ false, "label": "DDP", "state": "complete", "usage":
+ "standard", "offline": false, "drawerLossProtection": false,
+ "trayLossProtection": false, "securityType": "capable",
+ "volumeGroupRef": "0400000060080E500023BB3400001F9F52CECC3F",
+ "driveBlockFormat": "__UNDEFINED", "usedSpace": "81604378624",
+ "volumeGroupData": {"type": "diskPool", "diskPoolData":
+ {"criticalReconstructPriority": "highest",
+ "poolUtilizationState": "utilizationOptimal",
+ "reconstructionReservedDriveCountCurrent": 3, "allocGranularity":
+ "4294967296", "degradedReconstructPriority": "high",
+ "backgroundOperationPriority": "low",
+ "reconstructionReservedAmt": "897111293952", "unusableCapacity":
+ "0", "reconstructionReservedDriveCount": 1,
+ "poolUtilizationWarningThreshold": 50,
+ "poolUtilizationCriticalThreshold": 85}}, "spindleSpeed": 10000,
+ "worldWideName": "60080E500023BB3400001F9F52CECC3F",
+ "spindleSpeedMatch": true, "totalRaidedSpace": "17273253317836",
+ "sequenceNum": 2, "protectionInformationCapable": false}]"""
+ elif re.match("^/storage-systems$", path):
+ response.status_code = 200
+ response.text = """[ {"freePoolSpace": 11142431623168,
+ "driveCount": 24,
+ "hostSparesUsed": 0, "id":
+ "1fa6efb5-f07b-4de4-9f0e-52e5f7ff5d1b",
+ "hotSpareSizeAsString": "0", "wwn":
+ "60080E500023C73400000000515AF323", "parameters":
+ {"minVolSize": 1048576, "maxSnapshotsPerBase": 16,
+ "maxDrives": 192, "maxVolumes": 512, "maxVolumesPerGroup":
+ 256, "maxMirrors": 0, "maxMappingsPerVolume": 1,
+ "maxMappableLuns": 256, "maxVolCopys": 511,
+ "maxSnapshots":
+ 256}, "hotSpareCount": 0, "hostSpareCountInStandby": 0,
+ "status": "needsattn", "trayCount": 1,
+ "usedPoolSpaceAsString": "5313000380416",
+ "ip2": "10.63.165.216", "ip1": "10.63.165.215",
+ "freePoolSpaceAsString": "11142431623168",
+ "types": "SAS",
+ "name": "stle2600-7_8", "hotSpareSize": 0,
+ "usedPoolSpace":
+ 5313000380416, "driveTypes": ["sas"],
+ "unconfiguredSpaceByDriveType": {},
+ "unconfiguredSpaceAsStrings": "0", "model": "2650",
+ "unconfiguredSpace": 0}]"""
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+$", path):
+ response.status_code = 200
+ response.text = """{"freePoolSpace": 11142431623168,
+ "driveCount": 24,
+ "hostSparesUsed": 0, "id":
+ "1fa6efb5-f07b-4de4-9f0e-52e5f7ff5d1b",
+ "hotSpareSizeAsString": "0", "wwn":
+ "60080E500023C73400000000515AF323", "parameters":
+ {"minVolSize": 1048576, "maxSnapshotsPerBase": 16,
+ "maxDrives": 192, "maxVolumes": 512, "maxVolumesPerGroup":
+ 256, "maxMirrors": 0, "maxMappingsPerVolume": 1,
+ "maxMappableLuns": 256, "maxVolCopys": 511,
+ "maxSnapshots":
+ 256}, "hotSpareCount": 0, "hostSpareCountInStandby": 0,
+ "status": "needsattn", "trayCount": 1,
+ "usedPoolSpaceAsString": "5313000380416",
+ "ip2": "10.63.165.216", "ip1": "10.63.165.215",
+ "freePoolSpaceAsString": "11142431623168",
+ "types": "SAS",
+ "name": "stle2600-7_8", "hotSpareSize": 0,
+ "usedPoolSpace":
+ 5313000380416, "driveTypes": ["sas"],
+ "unconfiguredSpaceByDriveType": {},
+ "unconfiguredSpaceAsStrings": "0", "model": "2650",
+ "unconfiguredSpace": 0}"""
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/volume-copy-jobs"
+ "/[0-9a-zA-Z]+$", path):
+ response.status_code = 200
+ response.text = """{"status": "complete",
+ "cloneCopy": true, "pgRef":
+ "3300000060080E500023C73400000ACA52D29454", "volcopyHandle":49160
+ , "idleTargetWriteProt": true, "copyPriority": "priority2",
+ "volcopyRef": "1800000060080E500023C73400000ACF52D29466",
+ "worldWideName": "60080E500023C73400000ACF52D29466",
+ "copyCompleteTime": "0", "sourceVolume":
+ "3500000060080E500023C73400000ACE52D29462", "currentManager":
+ "070000000000000000000002", "copyStartTime": "1389551671",
+ "reserved1": "00000000", "targetVolume":
+ "0200000060080E500023C73400000A8C52D10675"}"""
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/volume-mappings$", path):
+ response.status_code = 200
+ response.text = """[
+ {
+ "lunMappingRef":"8800000000000000000000000000000000000000",
+ "lun": 0,
+ "ssid": 16384,
+ "perms": 15,
+ "volumeRef": "0200000060080E500023BB34000003FB515C2293",
+ "type": "all",
+ "mapRef": "8400000060080E500023C73400300381515BFBA3"
+ }]
+ """
+ else:
+ # Unknown API
+ response.status_code = 500
+
+ return response
+
+ def do_POST(self, path, params, data, headers):
+ """Respond to a POST request."""
+
+ response = FakeEseriesResponse()
+ if "/devmgr/vn" not in path:
+ response.status_code = 404
+ data = json.loads(data) if data else None
+ (__, ___, path) = path.partition("/devmgr/vn")
+ if re.match("^/storage-systems/[0-9a-zA-Z]+/volumes$", path):
+ response.status_code = 200
+ text_json = json.loads("""
+ {"extremeProtection": false, "pitBaseVolume": true,
+ "dssMaxSegmentSize": 131072,
+ "totalSizeInBytes": "1073741824", "raidLevel": "raid6",
+ "volumeRef": "0200000060080E500023BB34000003FB515C2293",
+ "listOfMappings": [], "sectorOffset": "15",
+ "id": "0200000060080E500023BB34000003FB515C2293",
+ "wwn": "60080E500023BB3400001FC352D14CB2",
+ "capacity": "2147483648", "mgmtClientAttribute": 0,
+ "label": "CFDXJ67BLJH25DXCZFZD4NSF54",
+ "volumeFull": false,
+ "blkSize": 512, "volumeCopyTarget": false,
+ "volumeGroupRef":
+ "0400000060080E500023BB3400001F9F52CECC3F",
+ "preferredControllerId": "070000000000000000000001",
+ "currentManager": "070000000000000000000001",
+ "applicationTagOwned": false, "status": "optimal",
+ "segmentSize": 131072, "volumeUse": "standardVolume",
+ "action": "none", "preferredManager":
+ "070000000000000000000001", "volumeHandle": 15,
+ "offline": false, "preReadRedundancyCheckEnabled": false,
+ "dssPreallocEnabled": false, "name": "bdm-vc-test-1",
+ "worldWideName": "60080E500023BB3400001FC352D14CB2",
+ "currentControllerId": "070000000000000000000001",
+ "protectionInformationCapable": false, "mapped": false,
+ "reconPriority": 1, "protectionType":
+ "type1Protection"}""")
+ text_json['label'] = data['name']
+ text_json['name'] = data['name']
+ text_json['volumeRef'] = data['name']
+ text_json['id'] = data['name']
+ response.text = json.dumps(text_json)
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/volume-mappings$", path):
+ response.status_code = 200
+ text_json = json.loads("""
+ {
+ "lunMappingRef":"8800000000000000000000000000000000000000",
+ "lun": 0,
+ "ssid": 16384,
+ "perms": 15,
+ "volumeRef": "0200000060080E500023BB34000003FB515C2293",
+ "type": "all",
+ "mapRef": "8400000060080E500023C73400300381515BFBA3"
+ }
+ """)
+ text_json['volumeRef'] = data['mappableObjectId']
+ text_json['mapRef'] = data['targetId']
+ response.text = json.dumps(text_json)
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/hosts$", path):
+ response.status_code = 200
+ response.text = """{"isSAControlled": false,
+ "confirmLUNMappingCreation"
+ : false, "label": "stlrx300s7-55", "isLargeBlockFormatHost":
+ false, "clusterRef": "8500000060080E500023C7340036035F515B78FC",
+ "protectionInformationCapableAccessMethod": false,
+ "ports": [], "hostRef":
+ "8400000060080E500023C73400300381515BFBA3", "hostTypeIndex": 10,
+ "hostSidePorts": [{"label": "NewStore", "type": "iscsi",
+ "address": "iqn.1998-01.com.vmware:localhost-28a58148"}]}"""
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-groups$", path):
+ response.status_code = 200
+ text_json = json.loads("""{"status": "optimal",
+ "autoDeleteLimit": 0,
+ "maxRepositoryCapacity": "-65536", "rollbackStatus": "none"
+ , "unusableRepositoryCapacity": "0", "pitGroupRef":
+ "3300000060080E500023C7340000098D5294AC9A", "clusterSize":
+ 65536, "label": "C6JICISVHNG2TFZX4XB5ZWL7O",
+ "maxBaseCapacity":
+ "476187142128128", "repositoryVolume":
+ "3600000060080E500023BB3400001FA952CEF12C",
+ "fullWarnThreshold": 99, "repFullPolicy": "purgepit",
+ "action": "none", "rollbackPriority": "medium",
+ "creationPendingStatus": "none", "consistencyGroupRef":
+ "0000000000000000000000000000000000000000", "volumeHandle":
+ 49153, "consistencyGroup": false, "baseVolume":
+ "0200000060080E500023C734000009825294A534"}""")
+ text_json['label'] = data['name']
+ text_json['name'] = data['name']
+ text_json['pitGroupRef'] = data['name']
+ text_json['id'] = data['name']
+ text_json['baseVolume'] = data['baseMappableObjectId']
+ response.text = json.dumps(text_json)
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-images$", path):
+ response.status_code = 200
+ text_json = json.loads("""{"status": "optimal",
+ "pitCapacity": "2147483648",
+ "pitTimestamp": "1389315375", "pitGroupRef":
+ "3300000060080E500023C7340000098D5294AC9A", "creationMethod":
+ "user", "repositoryCapacityUtilization": "2818048",
+ "activeCOW": true, "isRollbackSource": false, "pitRef":
+ "3400000060080E500023BB3400631F335294A5A8",
+ "pitSequenceNumber": "19"}""")
+ text_json['label'] = data['groupId']
+ text_json['name'] = data['groupId']
+ text_json['id'] = data['groupId']
+ text_json['pitGroupRef'] = data['groupId']
+ response.text = json.dumps(text_json)
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-volumes$",
+ path):
+ response.status_code = 200
+ text_json = json.loads("""{"unusableRepositoryCapacity": "0",
+ "totalSizeInBytes":
+ "-1", "worldWideName": "60080E500023BB3400001FAD52CEF2F5",
+ "boundToPIT": true, "wwn":
+ "60080E500023BB3400001FAD52CEF2F5", "id":
+ "3500000060080E500023BB3400001FAD52CEF2F5",
+ "baseVol": "0200000060080E500023BB3400001FA352CECCAE",
+ "label": "bdm-pv-1", "volumeFull": false,
+ "preferredControllerId": "070000000000000000000001", "offline":
+ false, "viewSequenceNumber": "10", "status": "optimal",
+ "viewRef": "3500000060080E500023BB3400001FAD52CEF2F5",
+ "mapped": false, "accessMode": "readOnly", "viewTime":
+ "1389315613", "repositoryVolume":
+ "0000000000000000000000000000000000000000", "preferredManager":
+ "070000000000000000000001", "volumeHandle": 16385,
+ "currentManager": "070000000000000000000001",
+ "maxRepositoryCapacity": "0", "name": "bdm-pv-1",
+ "fullWarnThreshold": 0, "currentControllerId":
+ "070000000000000000000001", "basePIT":
+ "3400000060080E500023BB3400631F335294A5A8", "clusterSize":
+ 0, "mgmtClientAttribute": 0}""")
+ text_json['label'] = data['name']
+ text_json['name'] = data['name']
+ text_json['id'] = data['name']
+ text_json['basePIT'] = data['snapshotImageId']
+ text_json['baseVol'] = data['baseMappableObjectId']
+ response.text = json.dumps(text_json)
+ elif re.match("^/storage-systems$", path):
+ response.status_code = 200
+ response.text = """{"freePoolSpace": "17055871480319",
+ "driveCount": 24,
+ "wwn": "60080E500023C73400000000515AF323", "id": "1",
+ "hotSpareSizeAsString": "0", "hostSparesUsed": 0, "types": "",
+ "hostSpareCountInStandby": 0, "status": "optimal", "trayCount":
+ 1, "usedPoolSpaceAsString": "37452115456", "ip2":
+ "10.63.165.216", "ip1": "10.63.165.215",
+ "freePoolSpaceAsString": "17055871480319", "hotSpareCount": 0,
+ "hotSpareSize": "0", "name": "stle2600-7_8", "usedPoolSpace":
+ "37452115456", "driveTypes": ["sas"],
+ "unconfiguredSpaceByDriveType": {}, "unconfiguredSpaceAsStrings":
+ "0", "model": "2650", "unconfiguredSpace": "0"}"""
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+$",
+ path):
+ response.status_code = 200
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/volume-copy-jobs$",
+ path):
+ response.status_code = 200
+ response.text = """{"status": "complete", "cloneCopy": true,
+ "pgRef":
+ "3300000060080E500023C73400000ACA52D29454", "volcopyHandle":49160
+ , "idleTargetWriteProt": true, "copyPriority": "priority2",
+ "volcopyRef": "1800000060080E500023C73400000ACF52D29466",
+ "worldWideName": "60080E500023C73400000ACF52D29466",
+ "copyCompleteTime": "0", "sourceVolume":
+ "3500000060080E500023C73400000ACE52D29462", "currentManager":
+ "070000000000000000000002", "copyStartTime": "1389551671",
+ "reserved1": "00000000", "targetVolume":
+ "0200000060080E500023C73400000A8C52D10675"}"""
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/volumes/[0-9A-Za-z]+$",
+ path):
+ response.status_code = 200
+ response.text = """{"extremeProtection": false,
+ "pitBaseVolume": true,
+ "dssMaxSegmentSize": 131072,
+ "totalSizeInBytes": "1073741824", "raidLevel": "raid6",
+ "volumeRef": "0200000060080E500023BB34000003FB515C2293",
+ "listOfMappings": [], "sectorOffset": "15",
+ "id": "0200000060080E500023BB34000003FB515C2293",
+ "wwn": "60080E500023BB3400001FC352D14CB2",
+ "capacity": "2147483648", "mgmtClientAttribute": 0,
+ "label": "rename",
+ "volumeFull": false,
+ "blkSize": 512, "volumeCopyTarget": false,
+ "volumeGroupRef":
+ "0400000060080E500023BB3400001F9F52CECC3F",
+ "preferredControllerId": "070000000000000000000001",
+ "currentManager": "070000000000000000000001",
+ "applicationTagOwned": false, "status": "optimal",
+ "segmentSize": 131072, "volumeUse": "standardVolume",
+ "action": "none", "preferredManager":
+ "070000000000000000000001", "volumeHandle": 15,
+ "offline": false, "preReadRedundancyCheckEnabled": false,
+ "dssPreallocEnabled": false, "name": "bdm-vc-test-1",
+ "worldWideName": "60080E500023BB3400001FC352D14CB2",
+ "currentControllerId": "070000000000000000000001",
+ "protectionInformationCapable": false, "mapped": false,
+ "reconPriority": 1, "protectionType":
+ "type1Protection"}"""
+ else:
+ # Unknown API
+ response.status_code = 500
+
+ return response
+
+ def do_DELETE(self, path, params, data, headers):
+ """Respond to a DELETE request."""
+
+ response = FakeEseriesResponse()
+ if "/devmgr/vn" not in path:
+ response.status_code = 500
+
+ (__, ___, path) = path.partition("/devmgr/vn")
+ if re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-images"
+ "/[0-9A-Za-z]+$", path):
+ code = 204
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-groups"
+ "/[0-9A-Za-z]+$", path):
+ code = 204
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-volumes"
+ "/[0-9A-Za-z]+$", path):
+ code = 204
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/volume-copy-jobs"
+ "/[0-9A-Za-z]+$", path):
+ code = 204
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/volumes"
+ "/[0-9A-Za-z]+$", path):
+ code = 204
+ elif re.match("^/storage-systems/[0-9a-zA-Z]+/volume-mappings/"
+ "[0-9a-zA-Z]+$", path):
+ code = 204
+ else:
+ code = 500
+
+ response.status_code = code
+ return response
+
+
+class FakeEseriesHTTPSession(object):
+ """A fake requests.Session for netapp tests.
+ """
+ def __init__(self):
+ self.handler = FakeEseriesServerHandler()
+
+ def request(self, method, url, params, data, headers, timeout, verify):
+ address = '127.0.0.1:80'
+ (__, ___, path) = url.partition(address)
+ if method.upper() == 'GET':
+ return self.handler.do_GET(path, params, data, headers)
+ elif method.upper() == 'POST':
+ return self.handler.do_POST(path, params, data, headers)
+ elif method.upper() == 'DELETE':
+ return self.handler.do_DELETE(path, params, data, headers)
+ else:
+ raise exception.Invalid()
+
+
+class NetAppEseriesIscsiDriverTestCase(test.TestCase):
+ """Test case for NetApp e-series iscsi driver."""
+
+ volume = {'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1,
+ 'volume_name': 'lun1',
+ 'os_type': 'linux', 'provider_location': 'lun1',
+ 'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef',
+ 'provider_auth': 'provider a b', 'project_id': 'project',
+ 'display_name': None, 'display_description': 'lun1',
+ 'volume_type_id': None}
+ snapshot = {'id': '17928122-553b-4da9-9737-e5c3dcd97f75',
+ 'volume_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef',
+ 'size': 2, 'volume_name': 'lun1',
+ 'volume_size': 2, 'project_id': 'project',
+ 'display_name': None, 'display_description': 'lun1',
+ 'volume_type_id': None}
+ volume_sec = {'id': 'b6c01641-8955-4917-a5e3-077147478575',
+ 'size': 2, 'volume_name': 'lun1',
+ 'os_type': 'linux', 'provider_location': 'lun1',
+ 'id': 'b6c01641-8955-4917-a5e3-077147478575',
+ 'provider_auth': None, 'project_id': 'project',
+ 'display_name': None, 'display_description': 'lun1',
+ 'volume_type_id': None}
+ volume_clone = {'id': 'b4b24b27-c716-4647-b66d-8b93ead770a5', 'size': 3,
+ 'volume_name': 'lun1',
+ 'os_type': 'linux', 'provider_location': 'cl_sm',
+ 'id': 'b4b24b27-c716-4647-b66d-8b93ead770a5',
+ 'provider_auth': None,
+ 'project_id': 'project', 'display_name': None,
+ 'display_description': 'lun1',
+ 'volume_type_id': None}
+ volume_clone_large = {'id': 'f6ef5bf5-e24f-4cbb-b4c4-11d631d6e553',
+ 'size': 6, 'volume_name': 'lun1',
+ 'os_type': 'linux', 'provider_location': 'cl_lg',
+ 'id': 'f6ef5bf5-e24f-4cbb-b4c4-11d631d6e553',
+ 'provider_auth': None,
+ 'project_id': 'project', 'display_name': None,
+ 'display_description': 'lun1',
+ 'volume_type_id': None}
+ connector = {'initiator': 'iqn.1998-01.com.vmware:localhost-28a58148'}
+
+ def setUp(self):
+ super(NetAppEseriesIscsiDriverTestCase, self).setUp()
+ self._custom_setup()
+
+ def _custom_setup(self):
+ configuration = self._set_config(create_configuration())
+ self.driver = common.NetAppDriver(configuration=configuration)
+ requests.Session = mock.Mock(wraps=FakeEseriesHTTPSession)
+ self.driver.do_setup(context='context')
+ self.driver.check_for_setup_error()
+
+ def _set_config(self, configuration):
+ configuration.netapp_storage_family = 'eseries'
+ configuration.netapp_storage_protocol = 'iscsi'
+ configuration.netapp_transport_type = 'http'
+ configuration.netapp_server_hostname = '127.0.0.1'
+ configuration.netapp_server_port = '80'
+ configuration.netapp_webservice_path = '/devmgr/vn'
+ configuration.netapp_controller_ips = '127.0.0.2,127.0.0.3'
+ configuration.netapp_sa_password = 'pass1234'
+ configuration.netapp_login = 'rw'
+ configuration.netapp_password = 'rw'
+ configuration.netapp_storage_pools = 'DDP'
+ return configuration
+
+ def test_embedded_mode(self):
+ configuration = self._set_config(create_configuration())
+ configuration.netapp_controller_ips = '127.0.0.1,127.0.0.3'
+ driver = common.NetAppDriver(configuration=configuration)
+ driver.do_setup(context='context')
+ self.assertEqual(driver._client.get_system_id(),
+ '1fa6efb5-f07b-4de4-9f0e-52e5f7ff5d1b')
+
+ def test_check_system_pwd_not_sync(self):
+ def list_system():
+ if getattr(self, 'test_count', None):
+ self.test_count = 1
+ return {'status': 'passwordoutofsync'}
+ return {'status': 'needsAttention'}
+
+ self.driver._client.list_storage_system = mock.Mock(wraps=list_system)
+ result = self.driver._check_storage_system()
+ self.assertTrue(result)
+
+ def test_connect(self):
+ self.driver.check_for_setup_error()
+
+ def test_create_destroy(self):
+ self.driver.create_volume(self.volume)
+ self.driver.delete_volume(self.volume)
+
+ def test_create_vol_snapshot_destroy(self):
+ self.driver.create_volume(self.volume)
+ self.driver.create_snapshot(self.snapshot)
+ self.driver.create_volume_from_snapshot(self.volume_sec, self.snapshot)
+ self.driver.delete_snapshot(self.snapshot)
+ self.driver.delete_volume(self.volume)
+
+ def test_map_unmap(self):
+ self.driver.create_volume(self.volume)
+ connection_info = self.driver.initialize_connection(self.volume,
+ self.connector)
+ self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
+ properties = connection_info.get('data')
+ self.assertIsNotNone(properties, 'Target portal is none')
+ self.driver.terminate_connection(self.volume, self.connector)
+ self.driver.delete_volume(self.volume)
+
+ def test_cloned_volume_destroy(self):
+ self.driver.create_volume(self.volume)
+ self.driver.create_cloned_volume(self.snapshot, self.volume)
+ self.driver.delete_volume(self.volume)
+
+ def test_map_by_creating_host(self):
+ self.driver.create_volume(self.volume)
+ connector_new = {'initiator': 'iqn.1993-08.org.debian:01:1001'}
+ connection_info = self.driver.initialize_connection(self.volume,
+ connector_new)
+ self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
+ properties = connection_info.get('data')
+ self.assertIsNotNone(properties, 'Target portal is none')
+
+ def test_vol_stats(self):
+ self.driver.get_volume_stats(refresh=True)
+
+ def test_create_vol_snapshot_diff_size_resize(self):
+ self.driver.create_volume(self.volume)
+ self.driver.create_snapshot(self.snapshot)
+ self.driver.create_volume_from_snapshot(
+ self.volume_clone, self.snapshot)
+ self.driver.delete_snapshot(self.snapshot)
+ self.driver.delete_volume(self.volume)
+
+ def test_create_vol_snapshot_diff_size_subclone(self):
+ self.driver.create_volume(self.volume)
+ self.driver.create_snapshot(self.snapshot)
+ self.driver.create_volume_from_snapshot(
+ self.volume_clone_large, self.snapshot)
+ self.driver.delete_snapshot(self.snapshot)
+ self.driver.delete_volume(self.volume)
'cinder.volume.drivers.netapp.iscsi.NetAppDirect7modeISCSIDriver',
'nfs':
'cinder.volume.drivers.netapp.nfs.NetAppDirect7modeNfsDriver'
+ }, 'eseries':
+ {
+ 'iscsi':
+ 'cinder.volume.drivers.netapp.eseries.iscsi.Driver'
},
}
netapp_family_default =\
{
'ontap_cluster': 'nfs',
- 'ontap_7mode': 'nfs'
+ 'ontap_7mode': 'nfs',
+ 'eseries': 'iscsi'
}
--- /dev/null
+# Copyright (c) 2014 NetApp, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Client classes for web services.
+"""
+
+import json
+import requests
+import urlparse
+
+from cinder import exception
+from cinder.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+class WebserviceClient(object):
+ """Base client for e-series web services."""
+
+ def __init__(self, scheme, host, port, service_path, username,
+ password, **kwargs):
+ self._validate_params(scheme, host, port)
+ self._create_endpoint(scheme, host, port, service_path)
+ self._username = username
+ self._password = password
+ self._init_connection()
+
+ def _validate_params(self, scheme, host, port):
+ """Does some basic validation for web service params."""
+ if host is None or port is None or scheme is None:
+ msg = _("One of the required inputs from host, port"
+ " or scheme not found.")
+ raise exception.InvalidInput(reason=msg)
+ if scheme not in ('http', 'https'):
+ raise exception.InvalidInput(reason=_("Invalid transport type."))
+
+ def _create_endpoint(self, scheme, host, port, service_path):
+ """Creates end point url for the service."""
+ netloc = '%s:%s' % (host, port)
+ self._endpoint = urlparse.urlunparse((scheme, netloc, service_path,
+ None, None, None))
+
+ def _init_connection(self):
+ """Do client specific set up for session and connection pooling."""
+ self.conn = requests.Session()
+ if self._username and self._password:
+ self.conn.auth = (self._username, self._password)
+
+ def invoke_service(self, method='GET', url=None, params=None, data=None,
+ headers=None, timeout=None, verify=False):
+ url = url or self._endpoint
+ try:
+ response = self.conn.request(method, url, params, data,
+ headers=headers, timeout=timeout,
+ verify=verify)
+ # Catching error conditions other than the perceived ones.
+ # Helps propagating only known exceptions back to the caller.
+ except Exception as e:
+ LOG.exception(_("Unexpected error while invoking web service."
+ " Error - %s."), e)
+ raise exception.NetAppDriverException(
+ _("Invoking web service failed."))
+ self._eval_response(response)
+ return response
+
+ def _eval_response(self, response):
+ """Evaluates response before passing result to invoker."""
+ pass
+
+
+class RestClient(WebserviceClient):
+ """REST client specific to e-series storage service."""
+
+ def __init__(self, scheme, host, port, service_path, username,
+ password, **kwargs):
+ super(RestClient, self).__init__(scheme, host, port, service_path,
+ username, password, **kwargs)
+ kwargs = kwargs or {}
+ self._system_id = kwargs.get('system_id')
+ self._content_type = kwargs.get('content_type') or 'json'
+
+ def set_system_id(self, system_id):
+ """Set the storage system id."""
+ self._system_id = system_id
+
+ def get_system_id(self):
+ """Get the storage system id."""
+ return getattr(self, '_system_id', None)
+
+ def _get_resource_url(self, path, use_system=True, **kwargs):
+ """Creates end point url for rest service."""
+ kwargs = kwargs or {}
+ if use_system:
+ if not self._system_id:
+ raise exception.NotFound(_('Storage system id not set.'))
+ kwargs['system-id'] = self._system_id
+ path = path.format(**kwargs)
+ if not self._endpoint.endswith('/'):
+ self._endpoint = '%s/' % self._endpoint
+ return urlparse.urljoin(self._endpoint, path.lstrip('/'))
+
+ def _invoke(self, method, path, data=None, use_system=True,
+ timeout=None, verify=False, **kwargs):
+ """Invokes end point for resource on path."""
+ params = {'m': method, 'p': path, 'd': data, 'sys': use_system,
+ 't': timeout, 'v': verify, 'k': kwargs}
+ LOG.debug(_("Invoking rest with method: %(m)s, path: %(p)s,"
+ " data: %(d)s, use_system: %(sys)s, timeout: %(t)s,"
+ " verify: %(v)s, kwargs: %(k)s.") % (params))
+ url = self._get_resource_url(path, use_system, **kwargs)
+ if self._content_type == 'json':
+ headers = {'Accept': 'application/json',
+ 'Content-Type': 'application/json'}
+ data = json.dumps(data) if data else None
+ res = self.invoke_service(method, url, data=data,
+ headers=headers,
+ timeout=timeout, verify=verify)
+ return res.json() if res.text else None
+ else:
+ raise exception.NetAppDriverException(
+ _("Content type not supported."))
+
+ def _eval_response(self, response):
+ """Evaluates response before passing result to invoker."""
+ super(RestClient, self)._eval_response(response)
+ status_code = int(response.status_code)
+ # codes >= 300 are not ok and to be treated as errors
+ if status_code >= 300:
+ # Response code 422 returns error code and message
+ if status_code == 422:
+ msg = _("Response error - %s.") % response.text
+ else:
+ msg = _("Response error code - %s.") % status_code
+ raise exception.NetAppDriverException(msg)
+
+ def create_volume(self, pool, label, size, unit='gb', seg_size=0):
+ """Creates volume on array."""
+ path = "/storage-systems/{system-id}/volumes"
+ data = {'poolId': pool, 'name': label, 'sizeUnit': unit,
+ 'size': int(size), 'segSize': seg_size}
+ return self._invoke('POST', path, data)
+
+ def delete_volume(self, object_id):
+ """Deletes given volume from array."""
+ path = "/storage-systems/{system-id}/volumes/{object-id}"
+ return self._invoke('DELETE', path, **{'object-id': object_id})
+
+ def list_volumes(self):
+ """Lists all volumes in storage array."""
+ path = "/storage-systems/{system-id}/volumes"
+ return self._invoke('GET', path)
+
+ def list_volume(self, object_id):
+ """List given volume from array."""
+ path = "/storage-systems/{system-id}/volumes/{object-id}"
+ return self._invoke('GET', path, **{'object-id': object_id})
+
+ def update_volume(self, object_id, label):
+ """Renames given volume in array."""
+ path = "/storage-systems/{system-id}/volumes/{object-id}"
+ data = {'name': label}
+ return self._invoke('POST', path, data, **{'object-id': object_id})
+
+ def get_volume_mappings(self):
+ """Creates volume mapping on array."""
+ path = "/storage-systems/{system-id}/volume-mappings"
+ return self._invoke('GET', path)
+
+ def create_volume_mapping(self, object_id, target_id, lun):
+ """Creates volume mapping on array."""
+ path = "/storage-systems/{system-id}/volume-mappings"
+ data = {'mappableObjectId': object_id, 'targetId': target_id,
+ 'lun': lun}
+ return self._invoke('POST', path, data)
+
+ def delete_volume_mapping(self, map_object_id):
+ """Deletes given volume mapping from array."""
+ path = "/storage-systems/{system-id}/volume-mappings/{object-id}"
+ return self._invoke('DELETE', path, **{'object-id': map_object_id})
+
+ def list_hardware_inventory(self):
+ """Lists objects in the hardware inventory."""
+ path = "/storage-systems/{system-id}/hardware-inventory"
+ return self._invoke('GET', path)
+
+ def list_hosts(self):
+ """Lists host objects in the system."""
+ path = "/storage-systems/{system-id}/hosts"
+ return self._invoke('GET', path)
+
+ def create_host(self, label, host_type, ports=None, group_id=None):
+ """Creates host on array."""
+ path = "/storage-systems/{system-id}/hosts"
+ data = {'name': label, 'hostType': host_type}
+ data.setdefault('groupId', group_id) if group_id else None
+ data.setdefault('ports', ports) if ports else None
+ return self._invoke('POST', path, data)
+
+ def create_host_with_port(self, label, host_type, port_id,
+ port_label, port_type='iscsi', group_id=None):
+ """Creates host on array with given port information."""
+ port = {'type': port_type, 'port': port_id, 'label': port_label}
+ return self.create_host(label, host_type, [port], group_id)
+
+ def list_host_types(self):
+ """Lists host types in storage system."""
+ path = "/storage-systems/{system-id}/host-types"
+ return self._invoke('GET', path)
+
+ def list_snapshot_groups(self):
+ """Lists snapshot groups."""
+ path = "/storage-systems/{system-id}/snapshot-groups"
+ return self._invoke('GET', path)
+
+ def create_snapshot_group(self, label, object_id, storage_pool_id,
+ repo_percent=99, warn_thres=99, auto_del_limit=0,
+ full_policy='failbasewrites'):
+ """Creates snapshot group on array."""
+ path = "/storage-systems/{system-id}/snapshot-groups"
+ data = {'baseMappableObjectId': object_id, 'name': label,
+ 'storagePoolId': storage_pool_id,
+ 'repositoryPercentage': repo_percent,
+ 'warningThreshold': warn_thres,
+ 'autoDeleteLimit': auto_del_limit, 'fullPolicy': full_policy}
+ return self._invoke('POST', path, data)
+
+ def delete_snapshot_group(self, object_id):
+ """Deletes given snapshot group from array."""
+ path = "/storage-systems/{system-id}/snapshot-groups/{object-id}"
+ return self._invoke('DELETE', path, **{'object-id': object_id})
+
+ def create_snapshot_image(self, group_id):
+ """Creates snapshot image in snapshot group."""
+ path = "/storage-systems/{system-id}/snapshot-images"
+ data = {'groupId': group_id}
+ return self._invoke('POST', path, data)
+
+ def delete_snapshot_image(self, object_id):
+ """Deletes given snapshot image in snapshot group."""
+ path = "/storage-systems/{system-id}/snapshot-images/{object-id}"
+ return self._invoke('DELETE', path, **{'object-id': object_id})
+
+ def list_snapshot_images(self):
+ """Lists snapshot images."""
+ path = "/storage-systems/{system-id}/snapshot-images"
+ return self._invoke('GET', path)
+
+ def create_snapshot_volume(self, image_id, label, base_object_id,
+ storage_pool_id,
+ repo_percent=99, full_thres=99,
+ view_mode='readOnly'):
+ """Creates snapshot volume."""
+ path = "/storage-systems/{system-id}/snapshot-volumes"
+ data = {'snapshotImageId': image_id, 'fullThreshold': full_thres,
+ 'storagePoolId': storage_pool_id,
+ 'name': label, 'viewMode': view_mode,
+ 'repositoryPercentage': repo_percent,
+ 'baseMappableObjectId': base_object_id,
+ 'repositoryPoolId': storage_pool_id}
+ return self._invoke('POST', path, data)
+
+ def delete_snapshot_volume(self, object_id):
+ """Deletes given snapshot volume."""
+ path = "/storage-systems/{system-id}/snapshot-volumes/{object-id}"
+ return self._invoke('DELETE', path, **{'object-id': object_id})
+
+ def list_storage_pools(self):
+ """Lists storage pools in the array."""
+ path = "/storage-systems/{system-id}/storage-pools"
+ return self._invoke('GET', path)
+
+ def list_storage_systems(self):
+ """Lists managed storage systems registered with web service."""
+ path = "/storage-systems"
+ return self._invoke('GET', path, use_system=False)
+
+ def list_storage_system(self):
+ """List current storage system registered with web service."""
+ path = "/storage-systems/{system-id}"
+ return self._invoke('GET', path)
+
+ def register_storage_system(self, controller_addresses, password=None,
+ wwn=None):
+ """Registers storage system with web service."""
+ path = "/storage-systems"
+ data = {'controllerAddresses': controller_addresses}
+ data.setdefault('wwn', wwn) if wwn else None
+ data.setdefault('password', password) if password else None
+ return self._invoke('POST', path, data, use_system=False)
+
+ def update_stored_system_password(self, password):
+ """Update array password stored on web service."""
+ path = "/storage-systems/{system-id}"
+ data = {'storedPassword': password}
+ return self._invoke('POST', path, data)
+
+ def create_volume_copy_job(self, src_id, tgt_id, priority='priority4',
+ tgt_wrt_protected='true'):
+ """Creates a volume copy job."""
+ path = "/storage-systems/{system-id}/volume-copy-jobs"
+ data = {'sourceId': src_id, 'targetId': tgt_id,
+ 'copyPriority': priority,
+ 'targetWriteProtected': tgt_wrt_protected}
+ return self._invoke('POST', path, data)
+
+ def control_volume_copy_job(self, obj_id, control='start'):
+ """Controls a volume copy job."""
+ path = ("/storage-systems/{system-id}/volume-copy-jobs-control"
+ "/{object-id}?control={String}")
+ return self._invoke('PUT', path, **{'object-id': obj_id,
+ 'String': control})
+
+ def list_vol_copy_job(self, object_id):
+ """List volume copy job."""
+ path = "/storage-systems/{system-id}/volume-copy-jobs/{object-id}"
+ return self._invoke('GET', path, **{'object-id': object_id})
+
+ def delete_vol_copy_job(self, object_id):
+ """Delete volume copy job."""
+ path = "/storage-systems/{system-id}/volume-copy-jobs/{object-id}"
+ return self._invoke('DELETE', path, **{'object-id': object_id})
--- /dev/null
+# Copyright (c) 2014 NetApp, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+iSCSI driver for NetApp E-series storage systems.
+"""
+
+import socket
+import time
+import uuid
+
+from oslo.config import cfg
+
+from cinder import exception
+from cinder.openstack.common import excutils
+from cinder.openstack.common import log as logging
+from cinder import units
+from cinder.volume import driver
+from cinder.volume.drivers.netapp.eseries import client
+from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
+from cinder.volume.drivers.netapp.options import netapp_connection_opts
+from cinder.volume.drivers.netapp.options import netapp_eseries_opts
+from cinder.volume.drivers.netapp.options import netapp_transport_opts
+from cinder.volume.drivers.netapp import utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+CONF = cfg.CONF
+CONF.register_opts(netapp_basicauth_opts)
+CONF.register_opts(netapp_connection_opts)
+CONF.register_opts(netapp_eseries_opts)
+CONF.register_opts(netapp_transport_opts)
+
+
+class Driver(driver.ISCSIDriver):
+ """Executes commands relating to Volumes."""
+
+ VERSION = "1.0.0"
+ required_flags = ['netapp_server_hostname', 'netapp_controller_ips',
+ 'netapp_login', 'netapp_password',
+ 'netapp_storage_pools']
+ SLEEP_SECS = 5
+ MAX_LUNS_PER_HOST = 255
+
+ def __init__(self, *args, **kwargs):
+ super(Driver, self).__init__(*args, **kwargs)
+ utils.validate_instantiation(**kwargs)
+ self.configuration.append_config_values(netapp_basicauth_opts)
+ self.configuration.append_config_values(netapp_connection_opts)
+ self.configuration.append_config_values(netapp_transport_opts)
+ self.configuration.append_config_values(netapp_eseries_opts)
+ self._objects = {'disk_pool_refs': [],
+ 'volumes': {'label_ref': {}, 'ref_vol': {}},
+ 'snapshots': {'label_ref': {}, 'ref_snap': {}}}
+
+ def do_setup(self, context):
+ """Any initialization the volume driver does while starting."""
+ self._check_flags()
+ self._client = client.RestClient(
+ scheme=self.configuration.netapp_transport_type,
+ host=self.configuration.netapp_server_hostname,
+ port=self.configuration.netapp_server_port,
+ service_path=self.configuration.netapp_webservice_path,
+ username=self.configuration.netapp_login,
+ password=self.configuration.netapp_password)
+ self._check_mode_get_or_register_storage_system()
+
+ def _check_flags(self):
+ """Ensure that the flags we care about are set."""
+ required_flags = self.required_flags
+ for flag in required_flags:
+ if not getattr(self.configuration, flag, None):
+ msg = _('%s is not set.') % flag
+ raise exception.InvalidInput(reason=msg)
+
+ def check_for_setup_error(self):
+ self._check_storage_system()
+ self._populate_system_objects()
+
+ def _check_mode_get_or_register_storage_system(self):
+ """Does validity checks for storage system registry and health."""
+ def _resolve_host(host):
+ try:
+ ip = utils.resolve_hostname(host)
+ return ip
+ except socket.gaierror as e:
+ LOG.error(_('Error resolving host %(host)s. Error - %(e)s.')
+ % {'host': host, 'e': e})
+ return None
+
+ ips = self.configuration.netapp_controller_ips
+ ips = [i.strip() for i in ips.split(",")]
+ ips = [x for x in ips if _resolve_host(x)]
+ host = utils.resolve_hostname(
+ self.configuration.netapp_server_hostname)
+ if not ips:
+ msg = _('Controller ips not valid after resolution.')
+ raise exception.NoValidHost(reason=msg)
+ if host in ips:
+ LOG.info(_('Embedded mode detected.'))
+ system = self._client.list_storage_systems()[0]
+ else:
+ LOG.info(_('Proxy mode detected.'))
+ system = self._client.register_storage_system(
+ ips, password=self.configuration.netapp_sa_password)
+ self._client.set_system_id(system.get('id'))
+
+ def _check_storage_system(self):
+ """Checks whether system is registered and has good status."""
+ try:
+ system = self._client.list_storage_system()
+ except exception.NetAppDriverException:
+ with excutils.save_and_reraise_exception():
+ msg = _("System with controller addresses [%s] is not"
+ " registered with web service.")
+ LOG.info(msg % self.configuration.netapp_controller_ips)
+ password_not_in_sync = False
+ if system.get('status', '').lower() == 'passwordoutofsync':
+ password_not_in_sync = True
+ new_pwd = self.configuration.netapp_sa_password
+ self._client.update_stored_system_password(new_pwd)
+ time.sleep(self.SLEEP_SECS)
+ sa_comm_timeout = 60
+ comm_time = 0
+ while True:
+ system = self._client.list_storage_system()
+ status = system.get('status', '').lower()
+ # wait if array not contacted or
+ # password was not in sync previously.
+ if ((status == 'nevercontacted') or
+ (password_not_in_sync and status == 'passwordoutofsync')):
+ LOG.info(_('Waiting for web service array communication.'))
+ time.sleep(self.SLEEP_SECS)
+ comm_time = comm_time + self.SLEEP_SECS
+ if comm_time >= sa_comm_timeout:
+ msg = _("Failure in communication between web service and"
+ " array. Waited %s seconds. Verify array"
+ " configuration parameters.")
+ raise exception.NetAppDriverException(msg %
+ sa_comm_timeout)
+ else:
+ break
+ msg_dict = {'id': system.get('id'), 'status': status}
+ if (status == 'passwordoutofsync' or status == 'notsupported' or
+ status == 'offline'):
+ msg = _("System %(id)s found with bad status - %(status)s.")
+ raise exception.NetAppDriverException(msg % msg_dict)
+ LOG.info(_("System %(id)s has %(status)s status.") % msg_dict)
+ return True
+
+ def _populate_system_objects(self):
+ """Get all system objects into cache."""
+ self._cache_allowed_disk_pool_refs()
+ for vol in self._client.list_volumes():
+ self._cache_volume(vol)
+ for sn in self._client.list_snapshot_groups():
+ self._cache_snap_grp(sn)
+ for image in self._client.list_snapshot_images():
+ self._cache_snap_img(image)
+
+ def _cache_allowed_disk_pool_refs(self):
+ """Caches disk pools refs as per pools configured by user."""
+ d_pools = self.configuration.netapp_storage_pools
+ LOG.info(_('Configured storage pools %s.'), d_pools)
+ pools = [x.strip().lower() if x else None for x in d_pools.split(',')]
+ for pool in self._client.list_storage_pools():
+ if (pool.get('raidLevel') == 'raidDiskPool'
+ and pool['label'].lower() in pools):
+ self._objects['disk_pool_refs'].append(pool['volumeGroupRef'])
+
+ def _cache_volume(self, obj):
+ """Caches volumes for further reference."""
+ if (obj.get('volumeUse') == 'standardVolume' and obj.get('label')
+ and obj.get('volumeRef')):
+ self._objects['volumes']['label_ref'][obj['label']]\
+ = obj['volumeRef']
+ self._objects['volumes']['ref_vol'][obj['volumeRef']] = obj
+
+ def _cache_snap_grp(self, obj):
+ """Caches snapshot groups."""
+ if (obj.get('label') and obj.get('pitGroupRef') and
+ obj.get('baseVolume') in self._objects['volumes']['ref_vol']):
+ self._objects['snapshots']['label_ref'][obj['label']] =\
+ obj['pitGroupRef']
+ self._objects['snapshots']['ref_snap'][obj['pitGroupRef']] = obj
+
+ def _cache_snap_img(self, image):
+ """Caches snapshot image under corresponding snapshot group."""
+ group_id = image.get('pitGroupRef')
+ sn_gp = self._objects['snapshots']['ref_snap']
+ if group_id in sn_gp:
+ sn_gp[group_id]['images'] = sn_gp[group_id].get('images') or []
+ sn_gp[group_id]['images'].append(image)
+
+ def _cache_vol_mapping(self, mapping):
+ """Caches volume mapping in volume object."""
+ vol_id = mapping['volumeRef']
+ volume = self._objects['volumes']['ref_vol'][vol_id]
+ volume['listOfMappings'] = volume.get('listOfMappings') or []
+ volume['listOfMappings'].append(mapping)
+
+ def _del_volume_frm_cache(self, label):
+ """Deletes volume from cache."""
+ vol_id = self._objects['volumes']['label_ref'].get(label)
+ if vol_id:
+ self._objects['volumes']['ref_vol'].pop(vol_id, True)
+ self._objects['volumes']['label_ref'].pop(label)
+ else:
+ LOG.debug(_("Volume %s not cached."), label)
+
+ def _del_snapshot_frm_cache(self, obj_name):
+ """Deletes snapshot group from cache."""
+ snap_id = self._objects['snapshots']['label_ref'].get(obj_name)
+ if snap_id:
+ self._objects['snapshots']['ref_snap'].pop(snap_id, True)
+ self._objects['snapshots']['label_ref'].pop(obj_name)
+ else:
+ LOG.debug(_("Snapshot %s not cached."), obj_name)
+
+ def _del_vol_mapping_frm_cache(self, mapping):
+ """Deletes volume mapping under cached volume."""
+ vol_id = mapping['volumeRef']
+ volume = self._objects['volumes']['ref_vol'].get(vol_id) or {}
+ mappings = volume.get('listOfMappings') or []
+ try:
+ mappings.remove(mapping)
+ except ValueError:
+ LOG.debug(_("Mapping with id %s already removed."),
+ mapping['lunMappingRef'])
+
+ def _get_volume(self, uid):
+ label = utils.convert_uuid_to_es_fmt(uid)
+ try:
+ return self._get_cached_volume(label)
+ except KeyError:
+ for vol in self._client.list_volumes():
+ if vol.get('label') == label:
+ self._cache_volume(vol)
+ break
+ return self._get_cached_volume(label)
+
+ def _get_cached_volume(self, label):
+ vol_id = self._objects['volumes']['label_ref'][label]
+ return self._objects['volumes']['ref_vol'][vol_id]
+
+ def _get_cached_snapshot_grp(self, uid):
+ label = utils.convert_uuid_to_es_fmt(uid)
+ snap_id = self._objects['snapshots']['label_ref'][label]
+ return self._objects['snapshots']['ref_snap'][snap_id]
+
+ def _get_cached_snap_grp_image(self, uid):
+ group = self._get_cached_snapshot_grp(uid)
+ images = group.get('images')
+ if images:
+ sorted_imgs = sorted(images, key=lambda x: x['pitTimestamp'])
+ return sorted_imgs[0]
+ msg = _("No pit image found in snapshot group %s.") % group['label']
+ raise exception.NotFound(msg)
+
+ def _is_volume_containing_snaps(self, label):
+ """Checks if volume contains snapshot groups."""
+ vol_id = self._objects['volumes']['label_ref'].get(label)
+ snp_grps = self._objects['snapshots']['ref_snap'].values()
+ for snap in snp_grps:
+ if snap['baseVolume'] == vol_id:
+ return True
+ return False
+
+ def create_volume(self, volume):
+ """Creates a volume."""
+ label = utils.convert_uuid_to_es_fmt(volume['id'])
+ size_gb = int(volume['size'])
+ vol = self._create_volume(label, size_gb)
+ self._cache_volume(vol)
+
+ def _create_volume(self, label, size_gb):
+ """Creates volume with given label and size."""
+ avl_pools = self._get_sorted_avl_storage_pools(size_gb)
+ for pool in avl_pools:
+ try:
+ vol = self._client.create_volume(pool['volumeGroupRef'],
+ label, size_gb)
+ LOG.info(_("Created volume with label %s."), label)
+ return vol
+ except exception.NetAppDriverException as e:
+ LOG.error(_("Error creating volume. Msg - %s."), e)
+ msg = _("Failure creating volume %s.")
+ raise exception.NetAppDriverException(msg % label)
+
+ def _get_sorted_avl_storage_pools(self, size_gb):
+ """Returns storage pools sorted on available capacity."""
+ size = size_gb * units.GiB
+ pools = self._client.list_storage_pools()
+ sorted_pools = sorted(pools, key=lambda x:
+ (int(x.get('totalRaidedSpace', 0))
+ - int(x.get('usedSpace', 0))), reverse=True)
+ avl_pools = [x for x in sorted_pools
+ if (x['volumeGroupRef'] in
+ self._objects['disk_pool_refs']) and
+ (int(x.get('totalRaidedSpace', 0)) -
+ int(x.get('usedSpace', 0) >= size))]
+ if not avl_pools:
+ msg = _("No storage pool found with available capacity %s.")
+ exception.NotFound(msg % size_gb)
+ return avl_pools
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Creates a volume from a snapshot."""
+ label = utils.convert_uuid_to_es_fmt(volume['id'])
+ size = volume['size']
+ dst_vol = self._create_volume(label, size)
+ try:
+ src_vol = None
+ src_vol = self._create_snapshot_volume(snapshot['id'])
+ self._copy_volume_high_prior_readonly(src_vol, dst_vol)
+ self._cache_volume(dst_vol)
+ LOG.info(_("Created volume with label %s."), label)
+ except exception.NetAppDriverException:
+ with excutils.save_and_reraise_exception():
+ self._client.delete_volume(dst_vol['volumeRef'])
+ finally:
+ if src_vol:
+ try:
+ self._client.delete_snapshot_volume(src_vol['id'])
+ except exception.NetAppDriverException as e:
+ LOG.error(_("Failure deleting snap vol. Error: %s."), e)
+ else:
+ LOG.warn(_("Snapshot volume not found."))
+
+ def _create_snapshot_volume(self, snapshot_id):
+ """Creates snapshot volume for given group with snapshot_id."""
+ group = self._get_cached_snapshot_grp(snapshot_id)
+ LOG.debug(_("Creating snap vol for group %s"), group['label'])
+ image = self._get_cached_snap_grp_image(snapshot_id)
+ label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
+ capacity = int(image['pitCapacity']) / units.GiB
+ storage_pools = self._get_sorted_avl_storage_pools(capacity)
+ s_id = storage_pools[0]['volumeGroupRef']
+ return self._client.create_snapshot_volume(image['pitRef'], label,
+ group['baseVolume'], s_id)
+
+ def _copy_volume_high_prior_readonly(self, src_vol, dst_vol):
+ """Copies src volume to dest volume."""
+ LOG.info(_("Copying src vol %(src)s to dest vol %(dst)s.")
+ % {'src': src_vol['label'], 'dst': dst_vol['label']})
+ try:
+ job = None
+ job = self._client.create_volume_copy_job(src_vol['id'],
+ dst_vol['volumeRef'])
+ while True:
+ j_st = self._client.list_vol_copy_job(job['volcopyRef'])
+ if (j_st['status'] == 'inProgress' or j_st['status'] ==
+ 'pending' or j_st['status'] == 'unknown'):
+ time.sleep(self.SLEEP_SECS)
+ continue
+ if (j_st['status'] == 'failed' or j_st['status'] == 'halted'):
+ LOG.error(_("Vol copy job status %s."), j_st['status'])
+ msg = _("Vol copy job for dest %s failed.")\
+ % dst_vol['label']
+ raise exception.NetAppDriverException(msg)
+ LOG.info(_("Vol copy job completed for dest %s.")
+ % dst_vol['label'])
+ break
+ finally:
+ if job:
+ try:
+ self._client.delete_vol_copy_job(job['volcopyRef'])
+ except exception.NetAppDriverException:
+ LOG.warn(_("Failure deleting job %s."), job['volcopyRef'])
+ else:
+ LOG.warn(_('Volume copy job for src vol %s not found.'),
+ src_vol['id'])
+ LOG.info(_('Copy job to dest vol %s completed.'), dst_vol['label'])
+
+ def create_cloned_volume(self, volume, src_vref):
+ """Creates a clone of the specified volume."""
+ snapshot = {'id': uuid.uuid4(), 'volume_id': src_vref['id']}
+ self.create_snapshot(snapshot)
+ try:
+ self.create_volume_from_snapshot(volume, snapshot)
+ finally:
+ try:
+ self.delete_snapshot(snapshot)
+ except exception.NetAppDriverException:
+ LOG.warn(_("Failure deleting temp snapshot %s."),
+ snapshot['id'])
+
+ def delete_volume(self, volume):
+ """Deletes a volume."""
+ try:
+ vol = self._get_volume(volume['id'])
+ self._delete_volume(vol['label'])
+ except KeyError:
+ LOG.info(_("Volume %s already deleted."), volume['id'])
+ return
+
+ def _delete_volume(self, label):
+ """Deletes an array volume."""
+ vol_id = self._objects['volumes']['label_ref'].get(label)
+ if vol_id:
+ self._client.delete_volume(vol_id)
+ self._del_volume_frm_cache(label)
+
+ def create_snapshot(self, snapshot):
+ """Creates a snapshot."""
+ snap_grp, snap_image = None, None
+ snapshot_name = utils.convert_uuid_to_es_fmt(snapshot['id'])
+ vol = self._get_volume(snapshot['volume_id'])
+ vol_size_gb = int(vol['totalSizeInBytes']) / units.GiB
+ pools = self._get_sorted_avl_storage_pools(vol_size_gb)
+ try:
+ snap_grp = self._client.create_snapshot_group(
+ snapshot_name, vol['volumeRef'], pools[0]['volumeGroupRef'])
+ self._cache_snap_grp(snap_grp)
+ snap_image = self._client.create_snapshot_image(
+ snap_grp['pitGroupRef'])
+ self._cache_snap_img(snap_image)
+ LOG.info(_("Created snap grp with label %s."), snapshot_name)
+ except exception.NetAppDriverException:
+ with excutils.save_and_reraise_exception():
+ if snap_image is None and snap_grp:
+ self.delete_snapshot(snapshot)
+
+ def delete_snapshot(self, snapshot):
+ """Deletes a snapshot."""
+ try:
+ snap_grp = self._get_cached_snapshot_grp(snapshot['id'])
+ except KeyError:
+ LOG.warn(_("Snapshot %s already deleted.") % snapshot['id'])
+ return
+ self._client.delete_snapshot_group(snap_grp['pitGroupRef'])
+ snapshot_name = snap_grp['label']
+ self._del_snapshot_frm_cache(snapshot_name)
+
+ def ensure_export(self, context, volume):
+ """Synchronously recreates an export for a volume."""
+ pass
+
+ def create_export(self, context, volume):
+ """Exports the volume."""
+ pass
+
+ def remove_export(self, context, volume):
+ """Removes an export for a volume."""
+ pass
+
+ def initialize_connection(self, volume, connector):
+ """Allow connection to connector and return connection info."""
+ initiator_name = connector['initiator']
+ vol = self._get_volume(volume['id'])
+ iscsi_det = self._get_iscsi_service_details()
+ mapping = self._map_volume_to_host(vol, initiator_name)
+ lun_id = mapping['lun']
+ self._cache_vol_mapping(mapping)
+ msg = _("Mapped volume %(id)s to the initiator %(initiator_name)s.")
+ msg_fmt = {'id': volume['id'], 'initiator_name': initiator_name}
+ LOG.debug(msg % msg_fmt)
+ msg = _("Successfully fetched target details for volume %(id)s and "
+ "initiator %(initiator_name)s.")
+ LOG.debug(msg % msg_fmt)
+ properties = {}
+ properties['target_discovered'] = False
+ properties['target_portal'] = '%s:%s' % (iscsi_det['ip'],
+ iscsi_det['tcp_port'])
+ properties['target_iqn'] = iscsi_det['iqn']
+ properties['target_lun'] = lun_id
+ properties['volume_id'] = volume['id']
+ auth = volume['provider_auth']
+ if auth:
+ (auth_method, auth_username, auth_secret) = auth.split()
+ properties['auth_method'] = auth_method
+ properties['auth_username'] = auth_username
+ properties['auth_password'] = auth_secret
+ return {
+ 'driver_volume_type': 'iscsi',
+ 'data': properties,
+ }
+
+ def _get_iscsi_service_details(self):
+ """Gets iscsi iqn, ip and port information."""
+ hw_inventory = self._client.list_hardware_inventory()
+ iscsi_ports = hw_inventory.get('iscsiPorts')
+ if iscsi_ports:
+ for port in iscsi_ports:
+ if (port.get('ipv4Enabled') and port.get('iqn') and
+ port.get('ipv4Data') and
+ port['ipv4Data'].get('ipv4AddressData') and
+ port['ipv4Data']['ipv4AddressData']
+ .get('ipv4Address') and port['ipv4Data']
+ ['ipv4AddressData'].get('configState')
+ == 'configured'):
+ iscsi_det = {}
+ iscsi_det['ip'] =\
+ port['ipv4Data']['ipv4AddressData']['ipv4Address']
+ iscsi_det['iqn'] = port['iqn']
+ iscsi_det['tcp_port'] = port.get('tcpListenPort', '3260')
+ return iscsi_det
+ msg = _('No good iscsi portal information found for %s.')
+ raise exception.NetAppDriverException(
+ msg % self._client.get_system_id())
+
+ def _map_volume_to_host(self, vol, initiator):
+ """Maps the e-series volume to host with initiator."""
+ host = self._get_or_create_host(initiator)
+ lun = self._get_free_lun(host)
+ return self._client.create_volume_mapping(vol['volumeRef'],
+ host['hostRef'], lun)
+
+ def _get_or_create_host(self, port_id, host_type='linux'):
+ """Fetch or create a host by given port."""
+ try:
+ return self._get_host_with_port(port_id, host_type)
+ except exception.NotFound as e:
+ LOG.warn(_("Message - %s."), e.msg)
+ return self._create_host(port_id, host_type)
+
+ def _get_host_with_port(self, port_id, host_type='linux'):
+ """Gets or creates a host with given port id."""
+ hosts = self._client.list_hosts()
+ ht_def = self._get_host_type_definition(host_type)
+ for host in hosts:
+ if (host.get('hostTypeIndex') == ht_def.get('index')
+ and host.get('hostSidePorts')):
+ ports = host.get('hostSidePorts')
+ for port in ports:
+ if (port.get('type') == 'iscsi'
+ and port.get('address') == port_id):
+ return host
+ msg = _("Host with port %(port)s and type %(type)s not found.")
+ raise exception.NotFound(msg % {'port': port_id, 'type': host_type})
+
+ def _create_host(self, port_id, host_type='linux'):
+ """Creates host on system with given initiator as port_id."""
+ LOG.info(_("Creating host with port %s."), port_id)
+ label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
+ port_label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
+ host_type = self._get_host_type_definition(host_type)
+ return self._client.create_host_with_port(label, host_type,
+ port_id, port_label)
+
+ def _get_host_type_definition(self, host_type='linux'):
+ """Gets supported host type if available on storage system."""
+ host_types = self._client.list_host_types()
+ for ht in host_types:
+ if ht.get('name', 'unknown').lower() == host_type.lower():
+ return ht
+ raise exception.NotFound(_("Host type %s not supported.") % host_type)
+
+ def _get_free_lun(self, host):
+ """Gets free lun for given host."""
+ luns = self._get_vol_mapping_for_host_frm_array(host['hostRef'])
+ used_luns = set(map(lambda lun: int(lun['lun']), luns))
+ for lun in xrange(self.MAX_LUNS_PER_HOST):
+ if lun not in used_luns:
+ return lun
+ msg = _("No free luns. Host might exceeded max luns.")
+ raise exception.NetAppDriverException(msg)
+
+ def _get_vol_mapping_for_host_frm_array(self, host_ref):
+ """Gets all volume mappings for given host from array."""
+ mappings = self._client.get_volume_mappings()
+ host_maps = filter(lambda x: x.get('mapRef') == host_ref, mappings)
+ return host_maps
+
+ def terminate_connection(self, volume, connector, **kwargs):
+ """Disallow connection from connector."""
+ vol = self._get_volume(volume['id'])
+ host = self._get_host_with_port(connector['initiator'])
+ mapping = self._get_cached_vol_mapping_for_host(vol, host)
+ self._client.delete_volume_mapping(mapping['lunMappingRef'])
+ self._del_vol_mapping_frm_cache(mapping)
+
+ def _get_cached_vol_mapping_for_host(self, volume, host):
+ """Gets cached volume mapping for given host."""
+ mappings = volume.get('listOfMappings') or []
+ for mapping in mappings:
+ if mapping.get('mapRef') == host['hostRef']:
+ return mapping
+ msg = _("Mapping not found for %(vol)s to host %(ht)s.")
+ raise exception.NotFound(msg % {'vol': volume['volumeRef'],
+ 'ht': host['hostRef']})
+
+ def get_volume_stats(self, refresh=False):
+ """Return the current state of the volume service."""
+ if refresh:
+ self._update_volume_stats()
+ return self._stats
+
+ def _update_volume_stats(self):
+ """Update volume statistics."""
+ LOG.debug(_("Updating volume stats."))
+ self._stats = self._stats or {}
+ netapp_backend = 'NetApp_ESeries'
+ backend_name = self.configuration.safe_get('volume_backend_name')
+ self._stats["volume_backend_name"] = (
+ backend_name or netapp_backend)
+ self._stats["vendor_name"] = 'NetApp'
+ self._stats["driver_version"] = '1.0'
+ self._stats["storage_protocol"] = 'iSCSI'
+ self._stats["total_capacity_gb"] = 0
+ self._stats["free_capacity_gb"] = 0
+ self._stats["reserved_percentage"] = 0
+ self._stats["QoS_support"] = False
+ self._update_capacity()
+ self._garbage_collect_tmp_vols()
+
+ def _update_capacity(self):
+ """Get free and total appliance capacity in bytes."""
+ tot_bytes, used_bytes = 0, 0
+ pools = self._client.list_storage_pools()
+ for pool in pools:
+ if pool['volumeGroupRef'] in self._objects['disk_pool_refs']:
+ tot_bytes = tot_bytes + int(pool.get('totalRaidedSpace', 0))
+ used_bytes = used_bytes + int(pool.get('usedSpace', 0))
+ self._stats['free_capacity_gb'] = (tot_bytes - used_bytes) / units.GiB
+ self._stats['total_capacity_gb'] = tot_bytes / units.GiB
+
+ def extend_volume(self, volume, new_size):
+ """Extend an existing volume to the new size."""
+ stage_1, stage_2 = 0, 0
+ src_vol = self._get_volume(volume['id'])
+ src_label = src_vol['label']
+ stage_label = 'tmp-%s' % utils.convert_uuid_to_es_fmt(uuid.uuid4())
+ extend_vol = {'id': uuid.uuid4(), 'size': new_size}
+ self.create_cloned_volume(extend_vol, volume)
+ new_vol = self._get_volume(extend_vol['id'])
+ try:
+ stage_1 = self._client.update_volume(src_vol['id'], stage_label)
+ stage_2 = self._client.update_volume(new_vol['id'], src_label)
+ new_vol = stage_2
+ self._cache_volume(new_vol)
+ self._cache_volume(stage_1)
+ LOG.info(_('Extended volume with label %s.'), src_label)
+ except exception.NetAppDriverException:
+ if stage_1 == 0:
+ with excutils.save_and_reraise_exception():
+ self._client.delete_volume(new_vol['id'])
+ if stage_2 == 0:
+ with excutils.save_and_reraise_exception():
+ self._client.update_volume(src_vol['id'], src_label)
+ self._client.delete_volume(new_vol['id'])
+
+ def _garbage_collect_tmp_vols(self):
+ """Removes tmp vols with no snapshots."""
+ try:
+ if not utils.set_safe_attr(self, 'clean_job_running', True):
+ LOG.warn(_('Returning as clean tmp vol job already running.'))
+ return
+ for label in self._objects['volumes']['label_ref'].keys():
+ if (label.startswith('tmp-') and
+ not self._is_volume_containing_snaps(label)):
+ try:
+ self._delete_volume(label)
+ except exception.NetAppDriverException:
+ LOG.debug(_("Error deleting vol with label %s."),
+ label)
+ finally:
+ utils.set_safe_attr(self, 'clean_job_running', False)
default='ontap_cluster',
help=('The storage family type used on the storage system; '
'valid values are ontap_7mode for using Data ONTAP '
- 'operating in 7-Mode or ontap_cluster for using '
- 'clustered Data ONTAP.')),
+ 'operating in 7-Mode, ontap_cluster for using '
+ 'clustered Data ONTAP, or eseries for using E-Series.')),
cfg.StrOpt('netapp_storage_protocol',
default=None,
help=('The storage protocol to be used on the data path with '
netapp_connection_opts = [
cfg.StrOpt('netapp_server_hostname',
default=None,
- help='The hostname (or IP address) for the storage system.'),
+ help='The hostname (or IP address) for the storage system or '
+ 'proxy server.'),
cfg.IntOpt('netapp_server_port',
default=80,
- help=('The TCP port to use for communication with ONTAPI on '
- 'the storage system. Traditionally, port 80 is used for '
- 'HTTP and port 443 is used for HTTPS; however, this '
+ help=('The TCP port to use for communication with the storage '
+ 'system or proxy server. Traditionally, port 80 is used '
+ 'for HTTP and port 443 is used for HTTPS; however, this '
'value should be changed if an alternate port has been '
- 'configured on the storage system.')), ]
+ 'configured on the storage system or proxy server.')), ]
netapp_transport_opts = [
cfg.StrOpt('netapp_transport_type',
default='http',
help=('The transport protocol used when communicating with '
- 'ONTAPI on the storage system. Valid values are http '
- 'or https.')), ]
+ 'the storage system or proxy server. Valid values are '
+ 'http or https.')), ]
netapp_basicauth_opts = [
cfg.StrOpt('netapp_login',
default=None,
help=('Administrative user account name used to access the '
- 'storage system.')),
+ 'storage system or proxy server.')),
cfg.StrOpt('netapp_password',
default=None,
help=('Password for the administrative user account '
'the value of this parameter, will be deleted from the '
'cache to create free space on the NFS share.')), ]
+netapp_eseries_opts = [
+ cfg.StrOpt('netapp_webservice_path',
+ default='/devmgr/v2',
+ help=('This option is used to specify the path to the E-Series '
+ 'proxy application on a proxy server. The value is '
+ 'combined with the value of the netapp_transport_type, '
+ 'netapp_server_hostname, and netapp_server_port options '
+ 'to create the URL used by the driver to connect to the '
+ 'proxy application.')),
+ cfg.StrOpt('netapp_controller_ips',
+ default=None,
+ help=('This option is only utilized when the storage family '
+ 'is configured to eseries. This option is used to '
+ 'restrict provisioning to the specified controllers. '
+ 'Specify the value of this option to be a comma '
+ 'separated list of controller hostnames or IP addresses '
+ 'to be used for provisioning.')),
+ cfg.StrOpt('netapp_sa_password',
+ default=None,
+ help=('Password for the NetApp E-Series storage array.'),
+ secret=True),
+ cfg.StrOpt('netapp_storage_pools',
+ default=None,
+ help=('This option is used to restrict provisioning to the '
+ 'specified storage pools. Only dynamic disk pools are '
+ 'currently supported. Specify the value of this option to'
+ ' be a comma separated list of disk pool names to be used'
+ ' for provisioning.')), ]
+
CONF = cfg.CONF
CONF.register_opts(netapp_proxy_opts)
CONF.register_opts(netapp_connection_opts)
CONF.register_opts(netapp_7mode_opts)
CONF.register_opts(netapp_provisioning_opts)
CONF.register_opts(netapp_img_cache_opts)
+CONF.register_opts(netapp_eseries_opts)
NetApp drivers to achieve the desired functionality.
"""
+import base64
+import binascii
import copy
import socket
+import uuid
from cinder import context
from cinder import exception
msg = _("Api version could not be determined.")
raise exception.VolumeBackendAPIException(data=msg)
return failed_apis
+
+
+def resolve_hostname(hostname):
+ """Resolves host name to IP address."""
+ res = socket.getaddrinfo(hostname, None)[0]
+ family, socktype, proto, canonname, sockaddr = res
+ return sockaddr[0]
+
+
+def encode_hex_to_base32(hex_string):
+ """Encodes hex to base32 bit as per RFC4648."""
+ bin_form = binascii.unhexlify(hex_string)
+ return base64.b32encode(bin_form)
+
+
+def decode_base32_to_hex(base32_string):
+ """Decodes base32 string to hex string."""
+ bin_form = base64.b32decode(base32_string)
+ return binascii.hexlify(bin_form)
+
+
+def convert_uuid_to_es_fmt(uuid_str):
+ """Converts uuid to e-series compatible name format."""
+ uuid_base32 = encode_hex_to_base32(uuid.UUID(str(uuid_str)).hex)
+ return uuid_base32.strip('=')
+
+
+def convert_es_fmt_to_uuid(es_label):
+ """Converts e-series name format to uuid."""
+ es_label_b32 = es_label.ljust(32, '=')
+ return uuid.UUID(binascii.hexlify(base64.b32decode(es_label_b32)))
#netapp_vfiler=<None>
# Administrative user account name used to access the storage
-# system. (string value)
+# system or proxy server. (string value)
#netapp_login=<None>
# Password for the administrative user account specified in
# function normally. (string value)
#netapp_vserver=<None>
-# The hostname (or IP address) for the storage system. (string
-# value)
+# The hostname (or IP address) for the storage system or proxy
+# server. (string value)
#netapp_server_hostname=<None>
-# The TCP port to use for communication with ONTAPI on the
-# storage system. Traditionally, port 80 is used for HTTP and
-# port 443 is used for HTTPS; however, this value should be
-# changed if an alternate port has been configured on the
-# storage system. (integer value)
+# The TCP port to use for communication with the storage
+# system or proxy server. Traditionally, port 80 is used for
+# HTTP and port 443 is used for HTTPS; however, this value
+# should be changed if an alternate port has been configured
+# on the storage system or proxy server. (integer value)
#netapp_server_port=80
+# This option is used to specify the path to the E-Series
+# proxy application on a proxy server. The value is combined
+# with the value of the netapp_transport_type,
+# netapp_server_hostname, and netapp_server_port options to
+# create the URL used by the driver to connect to the proxy
+# application. (string value)
+#netapp_webservice_path=/devmgr/v2
+
+# This option is only utilized when the storage family is
+# configured to eseries. This option is used to restrict
+# provisioning to the specified controllers. Specify the value
+# of this option to be a comma separated list of controller
+# hostnames or IP addresses to be used for provisioning.
+# (string value)
+#netapp_controller_ips=<None>
+
+# Password for the NetApp E-Series storage array. (string
+# value)
+#netapp_sa_password=<None>
+
+# This option is used to restrict provisioning to the
+# specified storage pools. Only dynamic disk pools are
+# currently supported. Specify the value of this option to be
+# a comma separated list of disk pool names to be used for
+# provisioning. (string value)
+#netapp_storage_pools=<None>
+
# If the percentage of available space for an NFS share has
# dropped below the value specified by this option, the NFS
# image cache will be cleaned. (integer value)
# The storage family type used on the storage system; valid
# values are ontap_7mode for using Data ONTAP operating in
-# 7-Mode or ontap_cluster for using clustered Data ONTAP.
-# (string value)
+# 7-Mode, ontap_cluster for using clustered Data ONTAP, or
+# eseries for using E-Series. (string value)
#netapp_storage_family=ontap_cluster
# The storage protocol to be used on the data path with the
# value)
#netapp_storage_protocol=<None>
-# The transport protocol used when communicating with ONTAPI
-# on the storage system. Valid values are http or https.
-# (string value)
+# The transport protocol used when communicating with the
+# storage system or proxy server. Valid values are http or
+# https. (string value)
#netapp_transport_type=http