--- /dev/null
+# Copyright (c) 2014 X-IO Technologies.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from cinder import context
+from cinder import exception
+from cinder.openstack.common import log as logging
+from cinder import test
+from cinder.volume.drivers import xio
+from cinder.volume import qos_specs
+from cinder.volume import volume_types
+
+LOG = logging.getLogger("cinder.volume.driver")
+
+ISE_IP1 = '10.12.12.1'
+ISE_IP2 = '10.11.12.2'
+ISE_ISCSI_IP1 = '1.2.3.4'
+ISE_ISCSI_IP2 = '1.2.3.5'
+
+ISE_GID = 'isegid'
+ISE_IQN = ISE_GID
+ISE_WWN1 = ISE_GID + '1'
+ISE_WWN2 = ISE_GID + '2'
+ISE_WWN3 = ISE_GID + '3'
+ISE_WWN4 = ISE_GID + '4'
+ISE_TARGETS = [ISE_WWN1, ISE_WWN2, ISE_WWN3, ISE_WWN4]
+ISE_INIT_TARGET_MAP = {'init_wwn1': ISE_TARGETS,
+ 'init_wwn2': ISE_TARGETS}
+
+VOLUME_SIZE = 10
+NEW_VOLUME_SIZE = 20
+
+VOLUME1 = {'id': '1', 'name': 'volume1',
+ 'size': VOLUME_SIZE, 'volume_type_id': 'type1'}
+
+VOLUME2 = {'id': '2', 'name': 'volume2',
+ 'size': VOLUME_SIZE, 'volume_type_id': 'type2',
+ 'provider_auth': 'CHAP abc abc'}
+
+VOLUME3 = {'id': '3', 'name': 'volume3',
+ 'size': VOLUME_SIZE, 'volume_type_id': None}
+
+SNAPSHOT1 = {'name': 'snapshot1',
+ 'volume_name': VOLUME1['name'],
+ 'volume_type_id': 'type3'}
+
+CLONE1 = {'id': '3', 'name': 'clone1',
+ 'size': VOLUME_SIZE, 'volume_type_id': 'type4'}
+
+HOST1 = 'host1'
+
+HOST2 = 'host2'
+
+ISCSI_CONN1 = {'initiator': 'init_iqn1',
+ 'host': HOST1}
+
+ISCSI_CONN2 = {'initiator': 'init_iqn2',
+ 'host': HOST2}
+
+FC_CONN1 = {'wwpns': ['init_wwn1', 'init_wwn2'],
+ 'host': HOST1}
+
+FC_CONN2 = {'wwpns': ['init_wwn3', 'init_wwn4'],
+ 'host': HOST2}
+
+ISE_HTTP_IP = 'http://' + ISE_IP1
+
+ISE_VOLUME1_LOCATION = '/storage/volumes/volume1'
+ISE_VOLUME1_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME1_LOCATION
+ISE_VOLUME2_LOCATION = '/storage/volumes/volume2'
+ISE_VOLUME2_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME2_LOCATION
+ISE_VOLUME3_LOCATION = '/storage/volumes/volume3'
+ISE_VOLUME3_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME3_LOCATION
+
+ISE_SNAPSHOT_LOCATION = '/storage/volumes/snapshot1'
+ISE_SNAPSHOT_LOCATION_URL = ISE_HTTP_IP + ISE_SNAPSHOT_LOCATION
+
+ISE_CLONE_LOCATION = '/storage/volumes/clone1'
+ISE_CLONE_LOCATION_URL = ISE_HTTP_IP + ISE_CLONE_LOCATION
+
+ISE_ALLOCATION_LOCATION = '/storage/allocations/a1'
+ISE_ALLOCATION_LOCATION_URL = ISE_HTTP_IP + ISE_ALLOCATION_LOCATION
+
+ISE_GET_QUERY_XML =\
+ """<array>
+ <globalid>ABC12345</globalid>
+ <capabilities>
+ <capability value="3" string="Storage" type="source"/>
+ <capability value="49003" string="Volume Affinity"/>
+ <capability value="49004" string="Volume Quality of Service IOPS"/>
+ <capability value="49005" string="Thin Provisioning"/>
+ <capability value="49006" string="Clones" type="source"/>
+ </capabilities>
+ <controllers>
+ <controller>
+ <ipaddress>%s</ipaddress>
+ <rank value="1"/>
+ </controller>
+ <controller>
+ <ipaddress>%s</ipaddress>
+ <rank value="0"/>
+ </controller>
+ </controllers>
+ </array>""" % (ISE_IP1, ISE_IP2)
+
+ISE_GET_QUERY_RESP =\
+ {'status': 200,
+ 'location': '',
+ 'content': " ".join(ISE_GET_QUERY_XML.split())}
+
+ISE_GET_QUERY_NO_GID_XML =\
+ """<array>
+ <capabilities>
+ <capability value="3" string="Storage" type="source"/>
+ <capability value="49003" string="Volume Affinity"/>
+ <capability value="49004" string="Volume Quality of Service IOPS"/>
+ <capability value="49005" string="Thin Provisioning"/>
+ <capability value="49006" string="Clones" type="source"/>
+ </capabilities>
+ <controllers>
+ <controller>
+ <ipaddress>%s</ipaddress>
+ <rank value="1"/>
+ </controller>
+ <controller>
+ <ipaddress>%s</ipaddress>
+ <rank value="0"/>
+ </controller>
+ </controllers>
+ </array>""" % (ISE_IP1, ISE_IP2)
+
+ISE_GET_QUERY_NO_GID_RESP =\
+ {'status': 200,
+ 'location': '',
+ 'content': " ".join(ISE_GET_QUERY_NO_GID_XML.split())}
+
+ISE_GET_QUERY_NO_CLONE_XML =\
+ """<array>
+ <globalid>ABC12345</globalid>
+ <capabilities>
+ <capability value="3" string="Storage" type="source"/>
+ <capability value="49003" string="Volume Affinity"/>
+ <capability value="49004" string="Volume Quality of Service IOPS"/>
+ <capability value="49005" string="Thin Provisioning"/>
+ </capabilities>
+ <controllers>
+ <controller>
+ <ipaddress>%s</ipaddress>
+ <rank value="1"/>
+ </controller>
+ <controller>
+ <ipaddress>%s</ipaddress>
+ <rank value="0"/>
+ </controller>
+ </controllers>
+ </array>""" % (ISE_IP1, ISE_IP2)
+
+ISE_GET_QUERY_NO_CLONE_RESP =\
+ {'status': 200,
+ 'location': '',
+ 'content': " ".join(ISE_GET_QUERY_NO_CLONE_XML.split())}
+
+ISE_GET_STORAGE_POOLS_XML =\
+ """
+ <pools>
+ <pool>
+ <name>Pool 1</name>
+ <id>1</id>
+ <status value="0" string="Operational">
+ <details value="0x00000000">
+ <detail>None</detail>
+ </details>
+ </status>
+ <available total="60">
+ <byredundancy>
+ <raid-0>60</raid-0>
+ <raid-1>30</raid-1>
+ <raid-5>45</raid-5>
+ </byredundancy>
+ </available>
+ <used total="40">
+ <byredundancy>
+ <raid-0>0</raid-0>
+ <raid-1>40</raid-1>
+ <raid-5>0</raid-5>
+ </byredundancy>
+ </used>
+ <media>
+ <medium>
+ <health>100</health>
+ <tier value="4" string="Hybrid"/>
+ </medium>
+ </media>
+ <volumes>
+ <volume>
+ <globalid>volgid</globalid>
+ </volume>
+ <volume>
+ <globalid>volgid2</globalid>
+ </volume>
+ </volumes>
+ </pool>
+ </pools>
+ """
+
+ISE_GET_STORAGE_POOLS_RESP =\
+ {'status': 200,
+ 'location': 'Pool location',
+ 'content': " ".join(ISE_GET_STORAGE_POOLS_XML.split())}
+
+ISE_GET_VOL_STATUS_NO_VOL_NODE_XML =\
+ """<volumes></volumes>"""
+
+ISE_GET_VOL_STATUS_NO_VOL_NODE_RESP =\
+ {'status': 200,
+ 'location': 'u%s' % ISE_VOLUME1_LOCATION_URL,
+ 'content': " ".join(ISE_GET_VOL_STATUS_NO_VOL_NODE_XML.split())}
+
+ISE_GET_VOL_STATUS_NO_STATUS_XML =\
+ """<volumes>
+ <volume self="%s">
+ </volume>
+ </volumes>""" % (ISE_VOLUME1_LOCATION_URL)
+
+ISE_GET_VOL_STATUS_NO_STATUS_RESP =\
+ {'status': 200,
+ 'location': 'u%s' % ISE_VOLUME1_LOCATION_URL,
+ 'content': " ".join(ISE_GET_VOL_STATUS_NO_STATUS_XML.split())}
+
+ISE_GET_VOL1_STATUS_XML =\
+ """<volumes>
+ <volume self="%s">
+ <status value="0" string="Operational">
+ <details>
+ <detail>Prepared</detail>
+ </details>
+ </status>
+ <size>10</size>
+ </volume>
+ </volumes>""" % (ISE_VOLUME1_LOCATION_URL)
+
+ISE_GET_VOL1_STATUS_RESP =\
+ {'status': 200,
+ 'location': 'u%s' % ISE_VOLUME1_LOCATION_URL,
+ 'content': " ".join(ISE_GET_VOL1_STATUS_XML.split())}
+
+ISE_GET_VOL2_STATUS_XML =\
+ """<volumes>
+ <volume self="%s">
+ <status value="0" string="Operational">
+ <details>
+ <detail>Prepared</detail>
+ </details>
+ </status>
+ </volume>
+ </volumes>""" % (ISE_VOLUME2_LOCATION_URL)
+
+ISE_GET_VOL2_STATUS_RESP =\
+ {'status': 200,
+ 'location': 'u%s' % ISE_VOLUME2_LOCATION_URL,
+ 'content': " ".join(ISE_GET_VOL2_STATUS_XML.split())}
+
+ISE_GET_VOL3_STATUS_XML =\
+ """<volumes>
+ <volume self="%s">
+ <status value="0" string="Operational">
+ <details>
+ <detail>Prepared</detail>
+ </details>
+ </status>
+ </volume>
+ </volumes>""" % (ISE_VOLUME3_LOCATION_URL)
+
+ISE_GET_VOL3_STATUS_RESP =\
+ {'status': 200,
+ 'location': 'u%s' % ISE_VOLUME3_LOCATION_URL,
+ 'content': " ".join(ISE_GET_VOL3_STATUS_XML.split())}
+
+ISE_GET_SNAP1_STATUS_XML =\
+ """<volumes>
+ <volume self="%s">
+ <status value="0" string="Operational">
+ <details>
+ <detail>Prepared</detail>
+ </details>
+ </status>
+ </volume>
+ </volumes>""" % (ISE_SNAPSHOT_LOCATION_URL)
+
+ISE_GET_SNAP1_STATUS_RESP =\
+ {'status': 200,
+ 'location': 'u%s' % ISE_SNAPSHOT_LOCATION_URL,
+ 'content': " ".join(ISE_GET_SNAP1_STATUS_XML.split())}
+
+ISE_GET_CLONE1_STATUS_XML =\
+ """<volumes>
+ <volume self="%s">
+ <status value="0" string="Operational">
+ <details>
+ <detail>Prepared</detail>
+ </details>
+ </status>
+ </volume>
+ </volumes>""" % (ISE_CLONE_LOCATION_URL)
+
+ISE_GET_CLONE1_STATUS_RESP =\
+ {'status': 200,
+ 'location': 'u%s' % ISE_CLONE_LOCATION_URL,
+ 'content': " ".join(ISE_GET_CLONE1_STATUS_XML.split())}
+
+ISE_CREATE_VOLUME_XML = """<volume/>"""
+
+ISE_CREATE_VOLUME_RESP =\
+ {'status': 201,
+ 'location': ISE_VOLUME1_LOCATION_URL,
+ 'content': " ".join(ISE_CREATE_VOLUME_XML.split())}
+
+ISE_GET_IONETWORKS_XML =\
+ """<chap>
+ <chapin value="0" string="disabled">
+ <username/>
+ <password/>
+ </chapin>
+ <chapout value="0" string="disabled">
+ <username/>
+ <password/>
+ </chapout>
+ </chap>"""
+
+ISE_GET_IONETWORKS_RESP =\
+ {'status': 200,
+ 'location': '',
+ 'content': " ".join(ISE_GET_IONETWORKS_XML.split())}
+
+ISE_GET_IONETWORKS_CHAP_XML =\
+ """<chap>
+ <chapin value="1" string="disabled">
+ <username>abc</username>
+ <password>abc</password>
+ </chapin>
+ <chapout value="0" string="disabled">
+ <username/>
+ <password/>
+ </chapout>
+ </chap>"""
+
+ISE_GET_IONETWORKS_CHAP_RESP =\
+ {'status': 200,
+ 'location': '',
+ 'content': " ".join(ISE_GET_IONETWORKS_CHAP_XML.split())}
+
+ISE_DELETE_VOLUME_XML = """<volumes/>"""
+
+ISE_DELETE_VOLUME_RESP =\
+ {'status': 204,
+ 'location': '',
+ 'content': " ".join(ISE_DELETE_VOLUME_XML.split())}
+
+ISE_GET_ALLOC_WITH_EP_XML =\
+ """<allocations>
+ <allocation self="%s">
+ <volume>
+ <volumename>%s</volumename>
+ </volume>
+ <endpoints>
+ <hostname>%s</hostname>
+ </endpoints>
+ <lun>1</lun>
+ </allocation>
+ </allocations>""" %\
+ (ISE_ALLOCATION_LOCATION_URL, VOLUME1['name'], HOST1)
+
+ISE_GET_ALLOC_WITH_EP_RESP =\
+ {'status': 200,
+ 'location': ISE_ALLOCATION_LOCATION_URL,
+ 'content': " ".join(ISE_GET_ALLOC_WITH_EP_XML.split())}
+
+ISE_GET_ALLOC_WITH_NO_ALLOC_XML =\
+ """<allocations self="%s"/>""" % ISE_ALLOCATION_LOCATION_URL
+
+ISE_GET_ALLOC_WITH_NO_ALLOC_RESP =\
+ {'status': 200,
+ 'location': ISE_ALLOCATION_LOCATION_URL,
+ 'content': " ".join(ISE_GET_ALLOC_WITH_NO_ALLOC_XML.split())}
+
+ISE_DELETE_ALLOC_XML = """<allocations/>"""
+
+ISE_DELETE_ALLOC_RESP =\
+ {'status': 204,
+ 'location': '',
+ 'content': " ".join(ISE_DELETE_ALLOC_XML.split())}
+
+ISE_GET_HOSTS_NOHOST_XML =\
+ """<hosts self="http://ip/storage/hosts"/>"""
+
+ISE_GET_HOSTS_NOHOST_RESP =\
+ {'status': 200,
+ 'location': '',
+ 'content': " ".join(ISE_GET_HOSTS_NOHOST_XML.split())}
+
+ISE_GET_HOSTS_HOST1_XML =\
+ """<hosts self="http://ip/storage/hosts">
+ <host self="http://ip/storage/hosts/1">
+ <name>%s</name>
+ <id>1</id>
+ <endpoints self="http://ip/storage/endpoints">
+ <endpoint self="http://ip/storage/endpoints/ep1">
+ <globalid>init_wwn1</globalid>
+ </endpoint>
+ <endpoint self="http://ip/storage/endpoints/ep2">
+ <globalid>init_wwn2</globalid>
+ </endpoint>
+ <endpoint self="http://ip/storage/endpoints/ep1">
+ <globalid>init_iqn1</globalid>
+ </endpoint>
+ </endpoints>
+ </host>
+ </hosts>""" % HOST1
+
+ISE_GET_HOSTS_HOST1_RESP =\
+ {'status': 200,
+ 'location': '',
+ 'content': " ".join(ISE_GET_HOSTS_HOST1_XML.split())}
+
+ISE_GET_HOSTS_HOST2_XML =\
+ """<hosts self="http://ip/storage/hosts">
+ <host self="http://ip/storage/hosts/2">
+ <name>%s</name>
+ <id>2</id>
+ <endpoints self="http://ip/storage/endpoints">
+ <endpoint self="http://ip/storage/endpoints/ep3">
+ <globalid>init_wwn3</globalid>
+ </endpoint>
+ <endpoint self="http://ip/storage/endpoints/ep4">
+ <globalid>init_wwn4</globalid>
+ </endpoint>
+ <endpoint self="http://ip/storage/endpoints/ep3">
+ <globalid>init_iqn2</globalid>
+ </endpoint>
+ </endpoints>
+ </host>
+ </hosts>""" % HOST2
+
+ISE_GET_HOSTS_HOST2_RESP =\
+ {'status': 200,
+ 'location': '',
+ 'content': " ".join(ISE_GET_HOSTS_HOST2_XML.split())}
+
+ISE_CREATE_HOST_XML =\
+ """<hosts self="http://ip/storage/hosts"/>"""
+
+ISE_CREATE_HOST_RESP =\
+ {'status': 201,
+ 'location': 'http://ip/storage/hosts/host1',
+ 'content': " ".join(ISE_CREATE_HOST_XML.split())}
+
+ISE_CREATE_ALLOC_XML =\
+ """<allocations self="http://ip/storage/allocations"/>"""
+
+ISE_CREATE_ALLOC_RESP =\
+ {'status': 201,
+ 'location': ISE_ALLOCATION_LOCATION_URL,
+ 'content': " ".join(ISE_CREATE_ALLOC_XML.split())}
+
+ISE_GET_ENDPOINTS_XML =\
+ """<endpoints self="http://ip/storage/endpoints">
+ <endpoint type="array" self="http://ip/storage/endpoints/isegid">
+ <globalid>isegid</globalid>
+ <protocol>iSCSI</protocol>
+ <array self="http://ip/storage/arrays/ise1">
+ <globalid>ise1</globalid>
+ </array>
+ <host/>
+ <allocations self="http://ip/storage/allocations">
+ <allocation self="%s">
+ <globalid>
+ a1
+ </globalid>
+ </allocation>
+ </allocations>
+ </endpoint>
+ <endpoint type="array" self="http://ip/storage/endpoints/isegid">
+ <globalid>isegid</globalid>
+ <protocol>Fibre Channel</protocol>
+ <array self="http://ip/storage/arrays/ise1">
+ <globalid>ise1</globalid>
+ </array>
+ <host/>
+ <allocations self="http://ip/storage/allocations">
+ <allocation self="%s">
+ <globalid>
+ a1
+ </globalid>
+ </allocation>
+ </allocations>
+ </endpoint>
+ </endpoints>""" % (ISE_ALLOCATION_LOCATION_URL,
+ ISE_ALLOCATION_LOCATION_URL)
+
+ISE_GET_ENDPOINTS_RESP =\
+ {'status': 200,
+ 'location': '',
+ 'content': " ".join(ISE_GET_ENDPOINTS_XML.split())}
+
+ISE_GET_CONTROLLERS_XML =\
+ """<controllers self="http://ip/storage/arrays/controllers">
+ <controller>
+ <status/>
+ <ioports>
+ <ioport>
+ <ipaddresses>
+ <ipaddress>%s</ipaddress>
+ </ipaddresses>
+ <endpoint>
+ <globalid>isegid</globalid>
+ </endpoint>
+ </ioport>
+ </ioports>
+ <fcports>
+ <fcport>
+ <wwn>%s</wwn>
+ </fcport>
+ <fcport>
+ <wwn>%s</wwn>
+ </fcport>
+ </fcports>
+ </controller>
+ <controller>
+ <status/>
+ <ioports>
+ <ioport>
+ <ipaddresses>
+ <ipaddress>%s</ipaddress>
+ </ipaddresses>
+ <endpoint>
+ <globalid>isegid</globalid>
+ </endpoint>
+ </ioport>
+ </ioports>
+ <fcports>
+ <fcport>
+ <wwn>%s</wwn>
+ </fcport>
+ <fcport>
+ <wwn>%s</wwn>
+ </fcport>
+ </fcports>
+ </controller>
+ </controllers>""" % (ISE_ISCSI_IP1, ISE_WWN1, ISE_WWN2,
+ ISE_ISCSI_IP2, ISE_WWN3, ISE_WWN4)
+
+ISE_GET_CONTROLLERS_RESP =\
+ {'status': 200,
+ 'location': '',
+ 'content': " ".join(ISE_GET_CONTROLLERS_XML.split())}
+
+ISE_CREATE_SNAPSHOT_XML = """<snapshot/>"""
+
+ISE_CREATE_SNAPSHOT_RESP =\
+ {'status': 201,
+ 'location': ISE_SNAPSHOT_LOCATION_URL,
+ 'content': " ".join(ISE_CREATE_SNAPSHOT_XML.split())}
+
+ISE_PREP_SNAPSHOT_XML = """<snapshot/>"""
+
+ISE_PREP_SNAPSHOT_RESP =\
+ {'status': 202,
+ 'location': ISE_SNAPSHOT_LOCATION_URL,
+ 'content': " ".join(ISE_PREP_SNAPSHOT_XML.split())}
+
+ISE_MODIFY_VOLUME_XML = """<volume/>"""
+
+ISE_MODIFY_VOLUME_RESP =\
+ {'status': 201,
+ 'location': ISE_VOLUME1_LOCATION_URL,
+ 'content': " ".join(ISE_MODIFY_VOLUME_XML.split())}
+
+ISE_BAD_CONNECTION_RESP =\
+ {'status': 0,
+ 'location': '',
+ 'content': " "}
+
+ISE_400_RESP =\
+ {'status': 400,
+ 'location': '',
+ 'content': ""}
+
+ISE_GET_VOL_STATUS_404_XML = \
+ """<response value="404" index="3">VOLUME not found.</response>"""
+
+ISE_GET_VOL_STATUS_404_RESP =\
+ {'status': 404,
+ 'location': '',
+ 'content': " ".join(ISE_GET_VOL_STATUS_404_XML.split())}
+
+ISE_400_INVALID_STATE_XML = \
+ """<response value="400">Not in a valid state.</response>"""
+
+ISE_400_INVALID_STATE_RESP =\
+ {'status': 400,
+ 'location': '',
+ 'content': " ".join(ISE_400_INVALID_STATE_XML.split())}
+
+ISE_409_CONFLICT_XML = \
+ """<response value="409">Conflict</response>"""
+
+ISE_409_CONFLICT_RESP =\
+ {'status': 409,
+ 'location': '',
+ 'content': " ".join(ISE_409_CONFLICT_XML.split())}
+
+
+DRIVER = "cinder.volume.drivers.xio.XIOISEDriver"
+
+
+@mock.patch(DRIVER + "._opener", autospec=True)
+class XIOISEDriverTestCase(object):
+
+ # Test cases for X-IO volume driver
+
+ def setUp(self):
+ super(XIOISEDriverTestCase, self).setUp()
+
+ # set good default values
+ self.configuration = mock.Mock()
+ self.configuration.san_ip = ISE_IP1
+ self.configuration.san_user = 'fakeuser'
+ self.configuration.san_password = 'fakepass'
+ self.configuration.iscsi_ip_address = ISE_ISCSI_IP1
+ self.configuration.driver_use_ssl = False
+ self.configuration.ise_completion_retries = 30
+ self.configuration.ise_connection_retries = 5
+ self.configuration.ise_retry_interval = 1
+ self.configuration.volume_backend_name = 'ise1'
+ self.driver = None
+ self.protocol = ''
+ self.connector = None
+ self.connection_failures = 0
+ self.hostgid = ''
+ self.use_response_table = 1
+
+ def setup_test(self, protocol):
+ self.protocol = protocol
+
+ # set good default values
+ if self.protocol == 'iscsi':
+ self.configuration.ise_protocol = protocol
+ self.connector = ISCSI_CONN1
+ self.hostgid = self.connector['initiator']
+ elif self.protocol == 'fibre_channel':
+ self.configuration.ise_protocol = protocol
+ self.connector = FC_CONN1
+ self.hostgid = self.connector['wwpns'][0]
+
+ def setup_driver(self):
+ # this setups up driver object with previously set configuration values
+ if self.configuration.ise_protocol == 'iscsi':
+ self.driver =\
+ xio.XIOISEISCSIDriver(configuration=self.configuration)
+ elif self.configuration.ise_protocol == 'fibre_channel':
+ self.driver =\
+ xio.XIOISEFCDriver(configuration=self.configuration)
+ elif self.configuration.ise_protocol == 'test_prot':
+ # if test_prot specified override with correct protocol
+ # used to bypass protocol specific driver
+ self.configuration.ise_protocol = self.protocol
+ self.driver = xio.XIOISEDriver(configuration=self.configuration)
+ else:
+ # Invalid protocol type
+ raise exception.Invalid()
+
+#################################
+## UNIT TESTS ##
+#################################
+ def test_do_setup(self, mock_req):
+ self.setup_driver()
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP])
+
+ def test_negative_do_setup_no_clone_support(self, mock_req):
+ self.setup_driver()
+ mock_req.side_effect = iter([ISE_GET_QUERY_NO_CLONE_RESP])
+ self.assertRaises(exception.XIODriverException,
+ self.driver.do_setup, None)
+
+ def test_negative_do_setup_bad_globalid_none(self, mock_req):
+ self.setup_driver()
+ mock_req.side_effect = iter([ISE_GET_QUERY_NO_GID_RESP])
+ self.assertRaises(exception.XIODriverException,
+ self.driver.do_setup, None)
+
+ def test_check_for_setup_error(self, mock_req):
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP])
+ self.setup_driver()
+ self.driver.check_for_setup_error()
+
+ def test_negative_do_setup_bad_ip(self, mock_req):
+ # set san_ip to bad value
+ self.configuration.san_ip = ''
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP])
+ self.setup_driver()
+ self.assertRaises(exception.XIODriverException,
+ self.driver.check_for_setup_error)
+
+ def test_negative_do_setup_bad_user_blank(self, mock_req):
+ # set san_user to bad value
+ self.configuration.san_login = ''
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP])
+ self.setup_driver()
+ self.assertRaises(exception.XIODriverException,
+ self.driver.check_for_setup_error)
+
+ def test_negative_do_setup_bad_password_blank(self, mock_req):
+ # set san_password to bad value
+ self.configuration.san_password = ''
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP])
+ self.setup_driver()
+ self.assertRaises(exception.XIODriverException,
+ self.driver.check_for_setup_error)
+
+ def test_get_volume_stats(self, mock_req):
+ self.setup_driver()
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_STORAGE_POOLS_RESP])
+
+ backend_name = self.configuration.volume_backend_name
+ if self.configuration.ise_protocol == 'iscsi':
+ protocol = 'iSCSI'
+ else:
+ protocol = 'fibre_channel'
+ exp_result = {}
+ exp_result = {'vendor_name': "X-IO",
+ 'driver_version': "1.1.0",
+ 'volume_backend_name': backend_name,
+ 'reserved_percentage': 0,
+ 'total_capacity_gb': 100,
+ 'free_capacity_gb': 60,
+ 'QoS_support': True,
+ 'affinity': True,
+ 'thin': False,
+ 'pools': [{'pool_ise_name': "Pool 1",
+ 'pool_name': "1",
+ 'status': "Operational",
+ 'status_details': "None",
+ 'free_capacity_gb': 60,
+ 'free_capacity_gb_raid_0': 60,
+ 'free_capacity_gb_raid_1': 30,
+ 'free_capacity_gb_raid_5': 45,
+ 'allocated_capacity_gb': 40,
+ 'allocated_capacity_gb_raid_0': 0,
+ 'allocated_capacity_gb_raid_1': 40,
+ 'allocated_capacity_gb_raid_5': 0,
+ 'health': 100,
+ 'media': "Hybrid",
+ 'total_capacity_gb': 100,
+ 'QoS_support': True,
+ 'reserved_percentage': 0}],
+ 'active_volumes': 2,
+ 'storage_protocol': protocol}
+
+ act_result = self.driver.get_volume_stats(True)
+ self.assertDictMatch(exp_result, act_result)
+
+ def test_get_volume_stats_ssl(self, mock_req):
+ self.configuration.driver_use_ssl = True
+ self.setup_driver()
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_STORAGE_POOLS_RESP])
+ self.driver.get_volume_stats(True)
+
+ def test_negative_get_volume_stats_bad_primary(self, mock_req):
+ self.configuration.ise_connection_retries = 1
+ self.setup_driver()
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_BAD_CONNECTION_RESP,
+ ISE_GET_STORAGE_POOLS_RESP])
+ self.driver.get_volume_stats(True)
+
+ def test_create_volume(self, mock_req):
+ ctxt = context.get_admin_context()
+ extra_specs = {"Feature:Pool": "1",
+ "Feature:Raid": "1",
+ "Affinity:Type": "flash",
+ "Alloc:Type": "thick"}
+ type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
+ specs = {'qos:minIOPS': '20',
+ 'qos:maxIOPS': '2000',
+ 'qos:burstIOPS': '5000'}
+ qos = qos_specs.create(ctxt, 'fake-qos', specs)
+ qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
+ VOLUME1['volume_type_id'] = type_ref['id']
+ self.setup_driver()
+ if self.configuration.ise_protocol == 'iscsi':
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_CREATE_VOLUME_RESP,
+ ISE_GET_VOL1_STATUS_RESP,
+ ISE_GET_IONETWORKS_RESP])
+ exp_result = {}
+ exp_result = {"provider_auth": ""}
+ act_result = self.driver.create_volume(VOLUME1)
+ self.assertDictMatch(exp_result, act_result)
+ elif self.configuration.ise_protocol == 'fibre_channel':
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_CREATE_VOLUME_RESP,
+ ISE_GET_VOL1_STATUS_RESP])
+ self.driver.create_volume(VOLUME1)
+
+ def test_create_volume_chap(self, mock_req):
+ ctxt = context.get_admin_context()
+ extra_specs = {"Feature:Pool": "1",
+ "Feature:Raid": "1",
+ "Affinity:Type": "flash",
+ "Alloc:Type": "thick"}
+ type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
+ specs = {'qos:minIOPS': '20',
+ 'qos:maxIOPS': '2000',
+ 'qos:burstIOPS': '5000'}
+ qos = qos_specs.create(ctxt, 'fake-qos', specs)
+ qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
+ VOLUME1['volume_type_id'] = type_ref['id']
+ self.setup_driver()
+ if self.configuration.ise_protocol == 'iscsi':
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_CREATE_VOLUME_RESP,
+ ISE_GET_VOL1_STATUS_RESP,
+ ISE_GET_IONETWORKS_CHAP_RESP])
+ exp_result = {}
+ exp_result = {"provider_auth": "CHAP abc abc"}
+ act_result = self.driver.create_volume(VOLUME1)
+ self.assertDictMatch(exp_result, act_result)
+ elif self.configuration.ise_protocol == 'fibre_channel':
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_CREATE_VOLUME_RESP,
+ ISE_GET_VOL1_STATUS_RESP])
+ self.driver.create_volume(VOLUME1)
+
+ def test_create_volume_type_none(self, mock_req):
+ self.setup_driver()
+ if self.configuration.ise_protocol == 'iscsi':
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_CREATE_VOLUME_RESP,
+ ISE_GET_VOL1_STATUS_RESP,
+ ISE_GET_IONETWORKS_RESP])
+ elif self.configuration.ise_protocol == 'fibre_channel':
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_CREATE_VOLUME_RESP,
+ ISE_GET_VOL1_STATUS_RESP])
+ self.driver.create_volume(VOLUME3)
+
+ def test_delete_volume(self, mock_req):
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_ALLOC_WITH_EP_RESP,
+ ISE_DELETE_ALLOC_RESP,
+ ISE_GET_VOL1_STATUS_RESP,
+ ISE_DELETE_VOLUME_RESP])
+ self.setup_driver()
+ self.driver.delete_volume(VOLUME1)
+
+ def test_delete_volume_none_existing(self, mock_req):
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_ALLOC_WITH_EP_RESP,
+ ISE_DELETE_ALLOC_RESP,
+ ISE_GET_VOL1_STATUS_RESP])
+ self.setup_driver()
+ self.driver.delete_volume(VOLUME2)
+
+ def test_initialize_connection_positive(self, mock_req):
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_HOSTS_HOST2_RESP,
+ ISE_CREATE_HOST_RESP,
+ ISE_GET_HOSTS_HOST1_RESP,
+ ISE_CREATE_ALLOC_RESP,
+ ISE_GET_ALLOC_WITH_EP_RESP,
+ ISE_GET_CONTROLLERS_RESP])
+ self.setup_driver()
+
+ exp_result = {}
+ if self.configuration.ise_protocol == 'iscsi':
+ exp_result = {"driver_volume_type": "iscsi",
+ "data": {"target_lun": '1',
+ "volume_id": '1',
+ "access_mode": 'rw',
+ "target_discovered": False,
+ "target_iqn": ISE_IQN,
+ "target_portal": ISE_ISCSI_IP1 + ":3260"}}
+ elif self.configuration.ise_protocol == 'fibre_channel':
+ exp_result = {"driver_volume_type": "fibre_channel",
+ "data": {"target_lun": '1',
+ "volume_id": '1',
+ "access_mode": 'rw',
+ "target_discovered": True,
+ "initiator_target_map": ISE_INIT_TARGET_MAP,
+ "target_wwn": ISE_TARGETS}}
+
+ act_result =\
+ self.driver.initialize_connection(VOLUME1, self.connector)
+ self.assertDictMatch(exp_result, act_result)
+
+ def test_initialize_connection_positive_chap(self, mock_req):
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_HOSTS_HOST2_RESP,
+ ISE_CREATE_HOST_RESP,
+ ISE_GET_HOSTS_HOST1_RESP,
+ ISE_CREATE_ALLOC_RESP,
+ ISE_GET_ALLOC_WITH_EP_RESP,
+ ISE_GET_CONTROLLERS_RESP])
+ self.setup_driver()
+ exp_result = {}
+ if self.configuration.ise_protocol == 'iscsi':
+ exp_result = {"driver_volume_type": "iscsi",
+ "data": {"target_lun": '1',
+ "volume_id": '2',
+ "access_mode": 'rw',
+ "target_discovered": False,
+ "target_iqn": ISE_IQN,
+ "target_portal": ISE_ISCSI_IP1 + ":3260",
+ 'auth_method': 'CHAP',
+ 'auth_username': 'abc',
+ 'auth_password': 'abc'}}
+ elif self.configuration.ise_protocol == 'fibre_channel':
+ exp_result = {"driver_volume_type": "fibre_channel",
+ "data": {"target_lun": '1',
+ "volume_id": '2',
+ "access_mode": 'rw',
+ "target_discovered": True,
+ "initiator_target_map": ISE_INIT_TARGET_MAP,
+ "target_wwn": ISE_TARGETS}}
+
+ act_result =\
+ self.driver.initialize_connection(VOLUME2, self.connector)
+ self.assertDictMatch(exp_result, act_result)
+
+ def test_terminate_connection_positive(self, mock_req):
+ self.setup_driver()
+ if self.configuration.ise_protocol == 'iscsi':
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_HOSTS_HOST1_RESP,
+ ISE_GET_ALLOC_WITH_EP_RESP,
+ ISE_DELETE_ALLOC_RESP])
+ elif self.configuration.ise_protocol == 'fibre_channel':
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_HOSTS_HOST1_RESP,
+ ISE_GET_ALLOC_WITH_EP_RESP,
+ ISE_DELETE_ALLOC_RESP,
+ ISE_GET_ALLOC_WITH_EP_RESP,
+ ISE_GET_CONTROLLERS_RESP])
+ self.driver.terminate_connection(VOLUME1, self.connector)
+
+ def test_terminate_connection_positive_noalloc(self, mock_req):
+ self.setup_driver()
+ if self.configuration.ise_protocol == 'iscsi':
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_HOSTS_HOST1_RESP,
+ ISE_GET_ALLOC_WITH_NO_ALLOC_RESP])
+ elif self.configuration.ise_protocol == 'fibre_channel':
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_HOSTS_HOST1_RESP,
+ ISE_GET_ALLOC_WITH_NO_ALLOC_RESP,
+ ISE_GET_ALLOC_WITH_NO_ALLOC_RESP,
+ ISE_GET_CONTROLLERS_RESP])
+ self.driver.terminate_connection(VOLUME1, self.connector)
+
+ def test_negative_terminate_connection_bad_host(self, mock_req):
+ self.setup_driver()
+ test_connector = {}
+ if self.configuration.ise_protocol == 'iscsi':
+ test_connector['initiator'] = 'bad_iqn'
+ test_connector['host'] = ''
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_HOSTS_HOST1_RESP])
+ elif self.configuration.ise_protocol == 'fibre_channel':
+ test_connector['wwpns'] = 'bad_wwn'
+ test_connector['host'] = ''
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_HOSTS_HOST1_RESP,
+ ISE_GET_CONTROLLERS_RESP])
+
+ self.driver.terminate_connection(VOLUME1, test_connector)
+
+ def test_create_snapshot(self, mock_req):
+ ctxt = context.get_admin_context()
+ extra_specs = {"Feature:Pool": "1",
+ "Feature:Raid": "1",
+ "Affinity:Type": "flash",
+ "Alloc:Type": "thick"}
+ type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
+ specs = {'qos:minIOPS': '20',
+ 'qos:maxIOPS': '2000',
+ 'qos:burstIOPS': '5000'}
+ qos = qos_specs.create(ctxt, 'fake-qos', specs)
+ qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
+ SNAPSHOT1['volume_type_id'] = type_ref['id']
+
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL1_STATUS_RESP,
+ ISE_PREP_SNAPSHOT_RESP,
+ ISE_GET_SNAP1_STATUS_RESP,
+ ISE_CREATE_SNAPSHOT_RESP,
+ ISE_GET_SNAP1_STATUS_RESP])
+ self.setup_driver()
+ self.driver.create_snapshot(SNAPSHOT1)
+
+ def test_negative_create_snapshot_invalid_state_recover(self, mock_req):
+ ctxt = context.get_admin_context()
+ extra_specs = {"Feature:Pool": "1",
+ "Feature:Raid": "1",
+ "Affinity:Type": "flash",
+ "Alloc:Type": "thick"}
+ type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
+ specs = {'qos:minIOPS': '20',
+ 'qos:maxIOPS': '2000',
+ 'qos:burstIOPS': '5000'}
+ qos = qos_specs.create(ctxt, 'fake-qos', specs)
+ qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
+ SNAPSHOT1['volume_type_id'] = type_ref['id']
+
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL1_STATUS_RESP,
+ ISE_400_INVALID_STATE_RESP,
+ ISE_PREP_SNAPSHOT_RESP,
+ ISE_GET_SNAP1_STATUS_RESP,
+ ISE_CREATE_SNAPSHOT_RESP,
+ ISE_GET_SNAP1_STATUS_RESP])
+ self.setup_driver()
+ self.driver.create_snapshot(SNAPSHOT1)
+
+ def test_negative_create_snapshot_invalid_state_norecover(self, mock_req):
+ ctxt = context.get_admin_context()
+ extra_specs = {"Feature:Pool": "1",
+ "Feature:Raid": "1",
+ "Affinity:Type": "flash",
+ "Alloc:Type": "thick"}
+ type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
+ specs = {'qos:minIOPS': '20',
+ 'qos:maxIOPS': '2000',
+ 'qos:burstIOPS': '5000'}
+ qos = qos_specs.create(ctxt, 'fake-qos', specs)
+ qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
+ SNAPSHOT1['volume_type_id'] = type_ref['id']
+
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL1_STATUS_RESP,
+ ISE_400_INVALID_STATE_RESP])
+ self.configuration.ise_completion_retries = 1
+ self.setup_driver()
+ self.assertRaises(exception.XIODriverException,
+ self.driver.create_snapshot, SNAPSHOT1)
+
+ def test_negative_create_snapshot_conflict(self, mock_req):
+ ctxt = context.get_admin_context()
+ extra_specs = {"Feature:Pool": "1",
+ "Feature:Raid": "1",
+ "Affinity:Type": "flash",
+ "Alloc:Type": "thick"}
+ type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
+ specs = {'qos:minIOPS': '20',
+ 'qos:maxIOPS': '2000',
+ 'qos:burstIOPS': '5000'}
+ qos = qos_specs.create(ctxt, 'fake-qos', specs)
+ qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
+ SNAPSHOT1['volume_type_id'] = type_ref['id']
+
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL1_STATUS_RESP,
+ ISE_409_CONFLICT_RESP])
+ self.configuration.ise_completion_retries = 1
+ self.setup_driver()
+ self.assertRaises(exception.XIODriverException,
+ self.driver.create_snapshot, SNAPSHOT1)
+
+ def test_delete_snapshot(self, mock_req):
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_ALLOC_WITH_EP_RESP,
+ ISE_DELETE_ALLOC_RESP,
+ ISE_GET_SNAP1_STATUS_RESP,
+ ISE_DELETE_VOLUME_RESP])
+ self.setup_driver()
+ self.driver.delete_snapshot(SNAPSHOT1)
+
+ def test_clone_volume(self, mock_req):
+ ctxt = context.get_admin_context()
+ extra_specs = {"Feature:Pool": "1",
+ "Feature:Raid": "1",
+ "Affinity:Type": "flash",
+ "Alloc:Type": "thick"}
+ type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
+ specs = {'qos:minIOPS': '20',
+ 'qos:maxIOPS': '2000',
+ 'qos:burstIOPS': '5000'}
+ qos = qos_specs.create(ctxt, 'fake-qos', specs)
+ qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
+ VOLUME1['volume_type_id'] = type_ref['id']
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL1_STATUS_RESP,
+ ISE_PREP_SNAPSHOT_RESP,
+ ISE_GET_SNAP1_STATUS_RESP,
+ ISE_CREATE_SNAPSHOT_RESP,
+ ISE_GET_SNAP1_STATUS_RESP])
+ self.setup_driver()
+ self.driver.create_cloned_volume(CLONE1, VOLUME1)
+
+ def test_extend_volume(self, mock_req):
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL1_STATUS_RESP,
+ ISE_MODIFY_VOLUME_RESP])
+ self.setup_driver()
+ self.driver.extend_volume(VOLUME1, NEW_VOLUME_SIZE)
+
+ def test_retype_volume(self, mock_req):
+ ctxt = context.get_admin_context()
+ extra_specs = {"Feature:Pool": "1",
+ "Feature:Raid": "1",
+ "Affinity:Type": "flash",
+ "Alloc:Type": "thick"}
+ type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
+ specs = {'qos:minIOPS': '20',
+ 'qos:maxIOPS': '2000',
+ 'qos:burstIOPS': '5000'}
+ qos = qos_specs.create(ctxt, 'fake-qos', specs)
+ qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
+ VOLUME1['volume_type_id'] = type_ref['id']
+ # New volume type
+ extra_specs = {"Feature:Pool": "1",
+ "Feature:Raid": "5",
+ "Affinity:Type": "flash",
+ "Alloc:Type": "thick"}
+ type_ref = volume_types.create(ctxt, 'VT2', extra_specs)
+ specs = {'qos:minIOPS': '30',
+ 'qos:maxIOPS': '3000',
+ 'qos:burstIOPS': '10000'}
+ qos = qos_specs.create(ctxt, 'fake-qos2', specs)
+ qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
+
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL1_STATUS_RESP,
+ ISE_MODIFY_VOLUME_RESP])
+ self.setup_driver()
+ self.driver.retype(ctxt, VOLUME1, type_ref, 0, 0)
+
+ def test_create_volume_from_snapshot(self, mock_req):
+ ctxt = context.get_admin_context()
+ extra_specs = {"Feature:Pool": "1",
+ "Feature:Raid": "1",
+ "Affinity:Type": "flash",
+ "Alloc:Type": "thick"}
+ type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
+ specs = {'qos:minIOPS': '20',
+ 'qos:maxIOPS': '2000',
+ 'qos:burstIOPS': '5000'}
+ qos = qos_specs.create(ctxt, 'fake-qos', specs)
+ qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
+ SNAPSHOT1['volume_type_id'] = type_ref['id']
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_SNAP1_STATUS_RESP,
+ ISE_PREP_SNAPSHOT_RESP,
+ ISE_GET_VOL1_STATUS_RESP,
+ ISE_CREATE_SNAPSHOT_RESP,
+ ISE_GET_VOL1_STATUS_RESP])
+ self.setup_driver()
+ self.driver.create_volume_from_snapshot(VOLUME1, SNAPSHOT1)
+
+ def test_manage_existing(self, mock_req):
+ ctxt = context.get_admin_context()
+ extra_specs = {"Feature:Pool": "1",
+ "Feature:Raid": "1",
+ "Affinity:Type": "flash",
+ "Alloc:Type": "thick"}
+ type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
+ specs = {'qos:minIOPS': '20',
+ 'qos:maxIOPS': '2000',
+ 'qos:burstIOPS': '5000'}
+ qos = qos_specs.create(ctxt, 'fake-qos', specs)
+ qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
+ VOLUME1['volume_type_id'] = type_ref['id']
+ self.setup_driver()
+ if self.configuration.ise_protocol == 'iscsi':
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL1_STATUS_RESP,
+ ISE_MODIFY_VOLUME_RESP,
+ ISE_GET_IONETWORKS_RESP])
+ elif self.configuration.ise_protocol == 'fibre_channel':
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL1_STATUS_RESP,
+ ISE_MODIFY_VOLUME_RESP])
+ self.driver.manage_existing(VOLUME1, {'source-name': 'testvol'})
+
+ def test_manage_existing_no_source_name(self, mock_req):
+ self.setup_driver()
+ if self.configuration.ise_protocol == 'iscsi':
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL1_STATUS_RESP,
+ ISE_MODIFY_VOLUME_RESP,
+ ISE_GET_IONETWORKS_RESP])
+ elif self.configuration.ise_protocol == 'fibre_channel':
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL1_STATUS_RESP,
+ ISE_MODIFY_VOLUME_RESP])
+ self.assertRaises(exception.XIODriverException,
+ self.driver.manage_existing, VOLUME1, {})
+
+ def test_manage_existing_get_size(self, mock_req):
+ self.setup_driver()
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL1_STATUS_RESP])
+ exp_result = 10
+ act_result = \
+ self.driver.manage_existing_get_size(VOLUME1,
+ {'source-name': 'a'})
+ self.assertEqual(exp_result, act_result)
+
+ def test_manage_existing_get_size_no_source_name(self, mock_req):
+ self.setup_driver()
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL1_STATUS_RESP])
+ self.assertRaises(exception.XIODriverException,
+ self.driver.manage_existing_get_size, VOLUME1, {})
+
+ def test_unmanage(self, mock_req):
+ self.setup_driver()
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL1_STATUS_RESP])
+ self.driver.unmanage(VOLUME1)
+
+ def test_negative_unmanage_no_volume_status_xml(self, mock_req):
+ self.setup_driver()
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL_STATUS_NO_STATUS_RESP])
+ self.driver.unmanage(VOLUME1)
+
+ def test_negative_unmanage_no_volume_xml(self, mock_req):
+ self.setup_driver()
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL_STATUS_NO_VOL_NODE_RESP])
+ self.assertRaises(exception.XIODriverException,
+ self.driver.unmanage, VOLUME1)
+
+ def test_negative_unmanage_non_existing_volume(self, mock_req):
+ self.setup_driver()
+ mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
+ ISE_GET_VOL_STATUS_404_RESP])
+ self.assertRaises(exception.XIODriverException,
+ self.driver.unmanage, VOLUME1)
+
+
+class XIOISEISCSIDriverTestCase(XIOISEDriverTestCase, test.TestCase):
+
+ def setUp(self):
+ super(XIOISEISCSIDriverTestCase, self).setUp()
+ self.setup_test('iscsi')
+
+
+class XIOISEFCDriverTestCase(XIOISEDriverTestCase, test.TestCase):
+
+ def setUp(self):
+ super(XIOISEFCDriverTestCase, self).setUp()
+ self.setup_test('fibre_channel')
--- /dev/null
+# Copyright (c) 2014 X-IO.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import string
+import urllib2
+
+from lxml import etree
+from oslo.config import cfg
+
+from cinder import context
+from cinder import exception
+from cinder.i18n import _LE, _LI, _LW
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import loopingcall
+from cinder.volume import driver
+from cinder.volume.drivers.san import san
+from cinder.volume import qos_specs
+from cinder.volume import volume_types
+from cinder.zonemanager import utils as fczm_utils
+
+XIO_OPTS = [
+ cfg.IntOpt('ise_storage_pool', default=1,
+ help='Default storage pool for volumes.'),
+ cfg.IntOpt('ise_raid', default=1,
+ help='Raid level for ISE volumes.'),
+ cfg.IntOpt('ise_connection_retries', default=5,
+ help='Number of retries (per port) when establishing '
+ 'connection to ISE management port.'),
+ cfg.IntOpt('ise_retry_interval', default=1,
+ help='Interval (secs) between retries.'),
+ cfg.IntOpt('ise_completion_retries', default=30,
+ help='Number on retries to get completion status after '
+ 'issuing a command to ISE.'),
+]
+
+
+CONF = cfg.CONF
+CONF.register_opts(XIO_OPTS)
+
+LOG = logging.getLogger(__name__)
+
+OPERATIONAL_STATUS = 'OPERATIONAL'
+PREPARED_STATUS = 'PREPARED'
+INVALID_STATUS = 'VALID'
+
+
+# Raise exception for X-IO driver
+def RaiseXIODriverException():
+ raise exception.XIODriverException()
+
+
+class XIOISEDriver(object):
+
+ VERSION = '1.1.0'
+
+ # Version Changes
+ # 1.0.0 Base driver
+ # 1.1.0 QoS, affinity, retype and thin support
+
+ def __init__(self, *args, **kwargs):
+ super(XIOISEDriver, self).__init__()
+ LOG.debug("XIOISEDriver __init__ called.")
+ self.configuration = kwargs.get('configuration', None)
+ self.ise_primary_ip = ''
+ self.ise_secondary_ip = ''
+ self.newquery = 1
+ self.ise_globalid = None
+ self._vol_stats = {}
+
+ def do_setup(self, context):
+ LOG.debug("XIOISEDriver do_setup called.")
+ self._get_ise_globalid()
+
+ def check_for_setup_error(self):
+ LOG.debug("XIOISEDriver check_for_setup_error called.")
+ # The san_ip must always be set
+ if self.configuration.san_ip == "":
+ msg = _LE("san ip must be configured!")
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # The san_login must always be set
+ if self.configuration.san_login == "":
+ msg = _LE("san_login must be configured!")
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # The san_password must always be set
+ if self.configuration.san_password == "":
+ msg = _LE("san_password must be configured!")
+ LOG.error(msg)
+ RaiseXIODriverException()
+ return
+
+ def _get_version(self):
+ """Return driver version."""
+ return self.VERSION
+
+ def _send_query(self):
+ """Do initial query to populate ISE global id."""
+ body = ''
+ url = '/query'
+ resp = self._connect('GET', url, body)
+ status = resp['status']
+ if status != 200:
+ # unsuccessful - this is fatal as we need the global id
+ # to build REST requests.
+ msg = _LE("Array query failed - No response (%d)!") % status
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # succesfully fetched QUERY info. Parse out globalid along with
+ # ipaddress for Controller 1 and Controller 2. We assign primary
+ # ipaddress to use based on controller rank
+ xml_tree = etree.fromstring(resp['content'])
+ # first check that the ISE is running a supported FW version
+ support = {}
+ support['thin'] = False
+ support['clones'] = False
+ support['thin-clones'] = False
+ self.configuration.ise_affinity = False
+ self.configuration.ise_qos = False
+ capabilities = xml_tree.find('capabilities')
+ if capabilities is None:
+ msg = _LE("Array query failed. No capabilities in response!")
+ LOG.error(msg)
+ RaiseXIODriverException()
+ for node in capabilities:
+ if node.tag != 'capability':
+ continue
+ capability = node
+ if capability.attrib['value'] == '49003':
+ self.configuration.ise_affinity = True
+ elif capability.attrib['value'] == '49004':
+ self.configuration.ise_qos = True
+ elif capability.attrib['value'] == '49005':
+ support['thin'] = True
+ elif capability.attrib['value'] == '49006':
+ support['clones'] = True
+ elif capability.attrib['value'] == '49007':
+ support['thin-clones'] = True
+ # Make sure ISE support necessary features
+ if not support['clones']:
+ msg = _LE("ISE FW version is not compatible with Openstack!")
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # set up thin provisioning support
+ self.configuration.san_thin_provision = support['thin-clones']
+ # Fill in global id, primary and secondary ip addresses
+ globalid = xml_tree.find('globalid')
+ if globalid is None:
+ msg = _LE("Array query failed. No global id in XML response!")
+ LOG.error(msg)
+ RaiseXIODriverException()
+ self.ise_globalid = globalid.text
+ controllers = xml_tree.find('controllers')
+ if controllers is None:
+ msg = _LE("Array query failed. No controllers in response!")
+ LOG.error(msg)
+ RaiseXIODriverException()
+ for node in controllers:
+ if node.tag != 'controller':
+ continue
+ # found a controller node
+ controller = node
+ ipaddress = controller.find('ipaddress')
+ ranktag = controller.find('rank')
+ if ipaddress is None:
+ continue
+ # found an ipaddress tag
+ # make sure rank tag is present
+ if ranktag is None:
+ continue
+ rank = ranktag.attrib['value']
+ # make sure rank value is present
+ if rank is None:
+ continue
+ if rank == '1':
+ # rank 1 means primary (xo)
+ self.ise_primary_ip = ipaddress.text
+ LOG.debug('Setting primary IP to: %s.',
+ self.ise_primary_ip)
+ elif rank == '0':
+ # rank 0 means secondary (nxo)
+ self.ise_secondary_ip = ipaddress.text
+ LOG.debug('Setting secondary IP to: %s.',
+ self.ise_secondary_ip)
+ # clear out new query request flag on successful fetch of QUERY info.
+ self.newquery = 0
+ return support
+
+ def _get_ise_globalid(self):
+ """Return ISE globalid."""
+ if self.ise_globalid is None or self.newquery == 1:
+ # this call will populate globalid
+ self._send_query()
+ if self.ise_globalid is None:
+ msg = _LE("ISE globalid not set!")
+ LOG.error(msg)
+ RaiseXIODriverException()
+ return self.ise_globalid
+
+ def _get_ise_primary_ip(self):
+ """Return Primary IP address to REST API."""
+ if self.ise_primary_ip == '':
+ # Primary IP is set to ISE IP passed in from cinder.conf
+ self.ise_primary_ip = self.configuration.san_ip
+ if self.ise_primary_ip == '':
+ # No IP - fatal.
+ msg = _LE("Primary IP must be set!")
+ LOG.error(msg)
+ RaiseXIODriverException()
+ return self.ise_primary_ip
+
+ def _get_ise_secondary_ip(self):
+ """Return Secondary IP address to REST API."""
+ if self.ise_secondary_ip != '':
+ return self.ise_secondary_ip
+
+ def _get_uri_prefix(self):
+ """Returns prefix in form of http(s)://1.2.3.4"""
+ prefix = ''
+ # figure out if http or https should be used
+ if self.configuration.driver_use_ssl:
+ prefix = 'https://'
+ else:
+ prefix = 'http://'
+ # add the IP address
+ prefix += self._get_ise_primary_ip()
+ return prefix
+
+ def _opener(self, method, url, body, header):
+ """Wrapper to handle connection"""
+ response = {}
+ response['status'] = 0
+ response['content'] = ''
+ response['location'] = ''
+ # send the request
+ req = urllib2.Request(url, body, header)
+ # Override method to allow GET, PUT, POST, DELETE
+ req.get_method = lambda: method
+ try:
+ resp = urllib2.urlopen(req)
+ except urllib2.HTTPError as err:
+ # HTTP error. Return HTTP status and content and let caller
+ # handle retries.
+ response['status'] = err.code
+ response['content'] = err.read()
+ except urllib2.URLError as err:
+ # Connection failure. Return a status of 0 to indicate error.
+ response['status'] = 0
+ else:
+ # Successful. Return status code, content,
+ # and location header, if present.
+ response['status'] = resp.getcode()
+ response['content'] = resp.read()
+ response['location'] = \
+ resp.info().getheader('Content-Location', '')
+ return response
+
+ def _help_call_method(self, args, retry_count):
+ """Helper function used for prepare clone and delete REST calls."""
+ # This function calls request method and URL and checks the response.
+ # Certain cases allows for retries, while success and fatal status
+ # will fall out and tell parent to break out of loop.
+ # initialize remaining to one less than retries
+ remaining = retry_count
+ resp = self._send_cmd(args['method'], args['url'], args['arglist'])
+ status = resp['status']
+ if (status == 400):
+ reason = ''
+ if 'content' in resp:
+ reason = etree.fromstring(resp['content'])
+ if reason is not None:
+ reason = string.upper(reason.text)
+ if INVALID_STATUS in reason:
+ # Request failed with an invalid state. This can be because
+ # source volume is in a temporary unavailable state.
+ LOG.debug('REST call failed with invalid state: '
+ '%(method)s - %(status)d - %(reason)s',
+ {'method': args['method'],
+ 'status': status, 'reason': reason})
+ # Let parent check retry eligibility based on remaining retries
+ remaining -= 1
+ else:
+ # Fatal error. Set remaining to 0 to make caller exit loop.
+ remaining = 0
+ else:
+ # set remaining to 0 to make caller exit loop
+ # original waiter will handle the difference between success and
+ # fatal error based on resp['status'].
+ remaining = 0
+ return (remaining, resp)
+
+ def _help_call_opener(self, args, retry_count):
+ """Helper function to call _opener."""
+ # This function calls _opener func and checks the response.
+ # If response is 0 it will decrement the remaining retry count.
+ # On successful connection it will set remaining to 0 to signal
+ # parent to break out of loop.
+ remaining = retry_count
+ response = self._opener(args['method'], args['url'],
+ args['body'], args['header'])
+ if response['status'] != 0:
+ # We are done
+ remaining = 0
+ else:
+ # Let parent check retry eligibility based on remaining retries.
+ remaining -= 1
+ # Return remaining and response
+ return (remaining, response)
+
+ def _help_wait_for_status(self, args, retry_count):
+ """Helper function to wait for specified volume status"""
+ # This function calls _get_volume_info and checks the response.
+ # If the status strings do not match the specified status it will
+ # return the remaining retry count decremented by one.
+ # On successful match it will set remaining to 0 to signal
+ # parent to break out of loop.
+ remaining = retry_count
+ info = self._get_volume_info(args['name'])
+ status = args['status_string']
+ if (status in info['string'] or status in info['details']):
+ remaining = 0
+ else:
+ # Let parent check retry eligibility based on remaining retries.
+ remaining -= 1
+ # return remaining and volume info
+ return (remaining, info)
+
+ def _wait_for_completion(self, help_func, args, retry_count):
+ """Helper function to wait for completion of passed function"""
+ # Helper call loop function.
+ def _call_loop(loop_args):
+ remaining = loop_args['retries']
+ args = loop_args['args']
+ LOG.debug("In call loop (%d) %s", remaining, args)
+ (remaining, response) = loop_args['func'](args, remaining)
+ if remaining == 0:
+ # We are done - let our caller handle response
+ raise loopingcall.LoopingCallDone(response)
+ args['retries'] = remaining
+
+ # Setup retries, interval and call wait function.
+ loop_args = {}
+ loop_args['retries'] = retry_count
+ loop_args['func'] = help_func
+ loop_args['args'] = args
+ interval = self.configuration.ise_retry_interval
+ timer = loopingcall.FixedIntervalLoopingCall(_call_loop, loop_args)
+ return timer.start(interval).wait()
+
+ def _connect(self, method, uri, body=''):
+ """Set up URL and HTML and call _opener to make request"""
+ url = ''
+ # see if we need to add prefix
+ # this call will force primary ip to be filled in as well
+ prefix = self._get_uri_prefix()
+ if prefix not in uri:
+ url = prefix
+ url += uri
+ # set up headers for XML and Auth
+ header = {'Content-Type': 'application/xml; charset=utf-8'}
+ auth_key =\
+ base64.encodestring('%s:%s' %
+ (self.configuration.san_login,
+ self.configuration.san_password))[:-1]
+ header['Authorization'] = 'Basic %s' % auth_key
+ # We allow 5 retries on each IP address. If connection to primary
+ # fails, secondary will be tried. If connection to secondary is
+ # successful, the request flag for a new QUERY will be set. The QUERY
+ # will be sent on next connection attempt to figure out which
+ # controller is primary in case it has changed.
+ LOG.debug("Connect: %s %s %s", method, url, body)
+ using_secondary = 0
+ response = {}
+ response['status'] = 0
+ response['location'] = ''
+ response['content'] = ''
+ primary_ip = self._get_ise_primary_ip()
+ secondary_ip = self._get_ise_secondary_ip()
+ # This will first try connecting to primary IP and then secondary IP.
+ args = {}
+ args['method'] = method
+ args['url'] = url
+ args['body'] = body
+ args['header'] = header
+ retries = self.configuration.ise_connection_retries
+ while True:
+ response = self._wait_for_completion(self._help_call_opener,
+ args, retries)
+ if response['status'] != 0:
+ # Connection succeeded. Request new query on next connection
+ # attempt if we used secondary ip to sort out who should be
+ # primary going forward
+ self.newquery = using_secondary
+ return response
+ # connection failed - check if we have any retries left
+ if using_secondary == 0:
+ # connection on primary ip failed
+ # try secondary ip
+ if secondary_ip is '':
+ # if secondary is not setup yet, then assert
+ # connection on primary and secondary ip failed
+ msg = (_LE("Connection to %s failed and no secondary!") %
+ primary_ip)
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # swap primary for secondary ip in URL
+ url = string.replace(url, primary_ip, secondary_ip)
+ LOG.debug('Trying secondary IP URL: %s', url)
+ using_secondary = 1
+ continue
+ # connection failed on both IPs - break out of the loop
+ break
+ # connection on primary and secondary ip failed
+ msg = (_LE("Could not connect to %(primary)s or %(secondary)s!") %
+ {'primary': primary_ip, 'secondary': secondary_ip})
+ LOG.error(msg)
+ RaiseXIODriverException()
+
+ def _param_string(self, params):
+ """Turn (name, value) pairs into single param string"""
+ param_str = []
+ for name, value in params.items():
+ if value != '':
+ param_str.append("%s=%s" % (name, value))
+ return '&'.join(param_str)
+
+ def _send_cmd(self, method, url, params):
+ """Prepare HTTP request and call _connect"""
+ # Add params to appropriate field based on method
+ body = ''
+ if method == 'GET':
+ if params != {}:
+ url += '?' + self._param_string(params)
+ body = ''
+ elif method == 'POST':
+ body = self._param_string(params)
+ elif method == 'DELETE':
+ body = ''
+ elif method == 'PUT':
+ if params != {}:
+ url += '?' + self._param_string(params)
+ # ISE REST API is mostly synchronous but has some asynchronous
+ # streaks. Add retries to work around design of ISE REST API that
+ # does not allow certain operations to be in process concurrently.
+ # This is only an issue if lots of CREATE/DELETE/SNAPSHOT/CLONE ops
+ # are issued in short order.
+ return self._connect(method, url, body)
+
+ def find_target_chap(self):
+ """Return target CHAP settings"""
+ chap = {}
+ chap['chap_user'] = ''
+ chap['chap_passwd'] = ''
+ url = '/storage/arrays/%s/ionetworks' % (self._get_ise_globalid())
+ resp = self._send_cmd('GET', url, {})
+ status = resp['status']
+ if status != 200:
+ msg = _LW("IOnetworks GET failed (%d)") % status
+ LOG.warning(msg)
+ return chap
+ # Got a good response. Parse out CHAP info. First check if CHAP is
+ # enabled and if so parse out username and password.
+ root = etree.fromstring(resp['content'])
+ for element in root.iter():
+ if element.tag != 'chap':
+ continue
+ chapin = element.find('chapin')
+ if chapin is None:
+ continue
+ if chapin.attrib['value'] != '1':
+ continue
+ # CHAP is enabled. Store username / pw
+ chap_user = chapin.find('username')
+ if chap_user is not None:
+ chap['chap_user'] = chap_user.text
+ chap_passwd = chapin.find('password')
+ if chap_passwd is not None:
+ chap['chap_passwd'] = chap_passwd.text
+ break
+ return chap
+
+ def find_target_iqn(self, iscsi_ip):
+ """Find Target IQN string"""
+ url = '/storage/arrays/%s/controllers' % (self._get_ise_globalid())
+ resp = self._send_cmd('GET', url, {})
+ status = resp['status']
+ if status != 200:
+ # Not good. Throw an exception.
+ msg = _LE("Controller GET failed (%d)") % status
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # Good response. Parse out IQN that matches iscsi_ip_address
+ # passed in from cinder.conf. IQN is 'hidden' in globalid field.
+ root = etree.fromstring(resp['content'])
+ for element in root.iter():
+ if element.tag != 'ioport':
+ continue
+ ipaddrs = element.find('ipaddresses')
+ if ipaddrs is None:
+ continue
+ for ipaddr in ipaddrs.iter():
+ # Look for match with iscsi_ip_address
+ if ipaddr is None or ipaddr.text != iscsi_ip:
+ continue
+ endpoint = element.find('endpoint')
+ if endpoint is None:
+ continue
+ global_id = endpoint.find('globalid')
+ if global_id is None:
+ continue
+ target_iqn = global_id.text
+ if target_iqn != '':
+ return target_iqn
+ # Did not find a matching IQN. Upsetting.
+ msg = _LE("Failed to get IQN!")
+ LOG.error(msg)
+ RaiseXIODriverException()
+
+ def find_target_wwns(self):
+ """Return target WWN"""
+ # Let's look for WWNs
+ target_wwns = []
+ target = ''
+ url = '/storage/arrays/%s/controllers' % (self._get_ise_globalid())
+ resp = self._send_cmd('GET', url, {})
+ status = resp['status']
+ if status != 200:
+ # Not good. Throw an exception.
+ msg = _LE("Controller GET failed (%d)") % status
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # Good response. Parse out globalid (WWN) of endpoint that matches
+ # protocol and type (array).
+ controllers = etree.fromstring(resp['content'])
+ for controller in controllers.iter():
+ if controller.tag != 'controller':
+ continue
+ fcports = controller.find('fcports')
+ if fcports is None:
+ continue
+ for fcport in fcports:
+ if fcport.tag != 'fcport':
+ continue
+ wwn_tag = fcport.find('wwn')
+ if wwn_tag is None:
+ continue
+ target = wwn_tag.text
+ target_wwns.append(target)
+ return target_wwns
+
+ def _find_target_lun(self, location):
+ """Return LUN for allocation specified in location string"""
+ resp = self._send_cmd('GET', location, {})
+ status = resp['status']
+ if status != 200:
+ # Not good. Throw an exception.
+ msg = _LE("Failed to get allocation information (%d)!") % status
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # Good response. Parse out LUN.
+ xml_tree = etree.fromstring(resp['content'])
+ allocation = xml_tree.find('allocation')
+ if allocation is not None:
+ luntag = allocation.find('lun')
+ if luntag is not None:
+ return luntag.text
+ # Did not find LUN. Throw an exception.
+ msg = _LE("Failed to get LUN information!")
+ LOG.error(msg)
+ RaiseXIODriverException()
+
+ def _get_volume_info(self, vol_name):
+ """Return status of ISE volume"""
+ vol_info = {}
+ vol_info['value'] = ''
+ vol_info['string'] = ''
+ vol_info['details'] = ''
+ vol_info['location'] = ''
+ vol_info['size'] = ''
+ # Attempt to collect status value, string and details. Also pick up
+ # location string from response. Location is used in REST calls
+ # DELETE/SNAPSHOT/CLONE.
+ # We ask for specific volume, so response should only contain one
+ # volume entry.
+ url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid())
+ resp = self._send_cmd('GET', url, {'name': vol_name})
+ if resp['status'] != 200:
+ msg = (_LW("Could not get status for %(name)s (%(status)d).") %
+ {'name': vol_name, 'status': resp['status']})
+ LOG.warning(msg)
+ return vol_info
+ # Good response. Parse down to Volume tag in list of one.
+ root = etree.fromstring(resp['content'])
+ volume_node = root.find('volume')
+ if volume_node is None:
+ msg = _LW("No volume node in XML content.")
+ LOG.warning(msg)
+ return vol_info
+ # Location can be found as an attribute in the volume node tag.
+ vol_info['location'] = volume_node.attrib['self']
+ # Find status tag
+ status = volume_node.find('status')
+ if status is None:
+ msg = _LW("No status payload for volume %s.") % vol_name
+ LOG.warning(msg)
+ return vol_info
+ # Fill in value and string from status tag attributes.
+ vol_info['value'] = status.attrib['value']
+ vol_info['string'] = string.upper(status.attrib['string'])
+ # Detailed status has it's own list of tags.
+ details = status.find('details')
+ if details is not None:
+ detail = details.find('detail')
+ if detail is not None:
+ vol_info['details'] = string.upper(detail.text)
+ # Get volume size
+ size_tag = volume_node.find('size')
+ if size_tag is not None:
+ vol_info['size'] = size_tag.text
+ # Return value, string, details and location.
+ return vol_info
+
+ def _alloc_location(self, volume, hostname, delete=0):
+ """Find location string for allocation. Also delete alloc per reqst"""
+ location = ''
+ url = '/storage/arrays/%s/allocations' % (self._get_ise_globalid())
+ resp = self._send_cmd('GET', url, {'name': volume['name'],
+ 'hostname': hostname})
+ if resp['status'] != 200:
+ msg = (_LE("Could not GET allocation information (%d)!") %
+ resp['status'])
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # Good response. Find the allocation based on volume name.
+ allocation_tree = etree.fromstring(resp['content'])
+ for allocation in allocation_tree.iter():
+ if allocation.tag != 'allocation':
+ continue
+ # verify volume name match
+ volume_tag = allocation.find('volume')
+ if volume_tag is None:
+ continue
+ volumename_tag = volume_tag.find('volumename')
+ if volumename_tag is None:
+ continue
+ volumename = volumename_tag.text
+ if volumename != volume['name']:
+ continue
+ # verified volume name match
+ # find endpoints list
+ endpoints = allocation.find('endpoints')
+ if endpoints is None:
+ continue
+ # Found endpoints list. Found matching host if hostname specified,
+ # otherwise any host is a go. This is used by the caller to
+ # delete all allocations (presentations) to a volume.
+ for endpoint in endpoints.iter():
+ if hostname != '':
+ hname_tag = endpoint.find('hostname')
+ if hname_tag is None:
+ continue
+ if string.upper(hname_tag.text) != string.upper(hostname):
+ continue
+ # Found hostname match. Location string is an attribute in
+ # allocation tag.
+ location = allocation.attrib['self']
+ # Delete allocation if requested.
+ if delete == 1:
+ self._send_cmd('DELETE', location, {})
+ location = ''
+ break
+ else:
+ return location
+ return location
+
+ def _present_volume(self, volume, hostname, lun):
+ """Present volume to host at specified LUN"""
+ # Set up params with volume name, host name and target lun, if
+ # specified.
+ target_lun = lun
+ params = {}
+ params = {'volumename': volume['name'],
+ 'hostname': hostname}
+ # Fill in LUN if specified.
+ if target_lun != '':
+ params['lun'] = target_lun
+ # Issue POST call to allocation.
+ url = '/storage/arrays/%s/allocations' % (self._get_ise_globalid())
+ resp = self._send_cmd('POST', url, params)
+ status = resp['status']
+ if status == 201:
+ LOG.info(_LI("Volume %s presented."), volume['name'])
+ elif status == 409:
+ msg = (_LW("Volume %(name)s already presented (%(status)d)!") %
+ {'name': volume['name'], 'status': status})
+ LOG.warning(msg)
+ else:
+ msg = (_LE("Failed to present volume %(name)s (%(status)d)!") %
+ {'name': volume['name'], 'status': status})
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # Fetch LUN. In theory the LUN should be what caller requested.
+ # We try to use shortcut as location comes back in Location header.
+ # Make sure shortcut of using location header worked, if not ask
+ # for it explicitly.
+ location = resp['location']
+ if location == '':
+ location = self._alloc_location(volume, hostname)
+ # Find target LUN
+ if location != '':
+ target_lun = self._find_target_lun(location)
+ # Success. Return target LUN.
+ LOG.debug("Volume %s presented: %s %s",
+ volume['name'], hostname, target_lun)
+ return target_lun
+
+ def find_allocations(self, hostname):
+ """Find allocations for specified host"""
+ alloc_cnt = 0
+ url = '/storage/arrays/%s/allocations' % (self._get_ise_globalid())
+ resp = self._send_cmd('GET', url, {'hostname': hostname})
+ status = resp['status']
+ if status != 200:
+ msg = (_LE("Failed to get allocation information: "
+ "%(host)s (%(status)d)!") %
+ {'host': hostname, 'status': status})
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # Good response. Count the number of allocations.
+ allocation_tree = etree.fromstring(resp['content'])
+ for allocation in allocation_tree.iter():
+ if allocation.tag != 'allocation':
+ continue
+ alloc_cnt += 1
+ return alloc_cnt
+
+ def _find_host(self, endpoints):
+ """Check if host entry exists on ISE based on endpoint (IQN, WWNs)"""
+ # FC host might have more than one endpoint. ISCSI has only one.
+ # Check if endpoints is a list, if so use first entry in list for
+ # host search.
+ if type(endpoints) is list:
+ for endpoint in endpoints:
+ ep = endpoint
+ break
+ else:
+ ep = endpoints
+ # Got single end point. Now make REST API call to fetch all hosts
+ LOG.debug("find_host: Looking for host %s.", ep)
+ host = {}
+ host['name'] = ''
+ host['type'] = ''
+ host['locator'] = ''
+ params = {}
+ url = '/storage/arrays/%s/hosts' % (self._get_ise_globalid())
+ resp = self._send_cmd('GET', url, params)
+ status = resp['status']
+ if resp['status'] != 200:
+ msg = _LE("Could not find any hosts (%s)") % status
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # Good response. Try to match up a host based on end point string.
+ host_tree = etree.fromstring(resp['content'])
+ for host_node in host_tree.iter():
+ if host_node.tag != 'host':
+ continue
+ # Found a host tag. Check if end point matches.
+ endpoints_node = host_node.find('endpoints')
+ if endpoints_node is None:
+ continue
+ for endpoint_node in endpoints_node.iter():
+ if endpoint_node.tag != 'endpoint':
+ continue
+ gid = endpoint_node.find('globalid')
+ if gid is None:
+ continue
+ if string.upper(gid.text) != string.upper(ep):
+ continue
+ # We have a match. Fill in host name, type and locator
+ host['locator'] = host_node.attrib['self']
+ type_tag = host_node.find('type')
+ if type_tag is not None:
+ host['type'] = type_tag.text
+ name_tag = host_node.find('name')
+ if name_tag is not None:
+ host['name'] = name_tag.text
+ break
+ # This will be filled in or '' based on findings above.
+ return host
+
+ def _create_host(self, hostname, endpoints):
+ """Create host entry on ISE for connector"""
+ # Create endpoint list for REST call.
+ endpoint_str = ''
+ if type(endpoints) is list:
+ ep_str = []
+ ec = 0
+ for endpoint in endpoints:
+ if ec == 0:
+ ep_str.append("%s" % (endpoint))
+ else:
+ ep_str.append("endpoint=%s" % (endpoint))
+ ec += 1
+ endpoint_str = '&'.join(ep_str)
+ else:
+ endpoint_str = endpoints
+ # Log host creation.
+ LOG.debug("Create host %s; %s", hostname, endpoint_str)
+ # Issue REST call to create host entry of Openstack type.
+ params = {}
+ params = {'name': hostname, 'endpoint': endpoint_str,
+ 'os': 'openstack'}
+ url = '/storage/arrays/%s/hosts' % (self._get_ise_globalid())
+ resp = self._send_cmd('POST', url, params)
+ status = resp['status']
+ if status != 201 and status != 409:
+ msg = _LE("POST for host create failed (%s)!") % status
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # succesfully created host entry. Return host name.
+ return hostname
+
+ def _create_clone(self, volume, clone, clone_type):
+ """Create clone worker function"""
+ # This function is called for both snapshot and clone
+ # clone_type specifies what type is being processed
+ # Creating snapshots and clones is a two step process on current ISE
+ # FW. First snapshot/clone is prepared and then created.
+ volume_name = ''
+ if clone_type == 'snapshot':
+ volume_name = volume['volume_name']
+ elif clone_type == 'clone':
+ volume_name = volume['name']
+ args = {}
+ # Make sure source volume is ready. This is another case where
+ # we have to work around asynchronous behavior in ISE REST API.
+ args['name'] = volume_name
+ args['status_string'] = OPERATIONAL_STATUS
+ retries = self.configuration.ise_completion_retries
+ vol_info = self._wait_for_completion(self._help_wait_for_status,
+ args, retries)
+ if vol_info['value'] == '0':
+ LOG.debug('Source volume %s ready.', volume_name)
+ else:
+ msg = _LE("Source volume %s not ready!") % volume_name
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # Prepare snapshot
+ # get extra_specs and qos specs from source volume
+ # these functions fill in default values for entries used below
+ ctxt = context.get_admin_context()
+ type_id = volume['volume_type_id']
+ extra_specs = self._get_extra_specs(ctxt, type_id)
+ LOG.debug("Volume %s extra_specs %s", volume['name'], extra_specs)
+ qos = self._get_qos_specs(ctxt, type_id)
+ # Wait until snapshot/clone is prepared.
+ args['method'] = 'POST'
+ args['url'] = vol_info['location']
+ args['status'] = 202
+ args['arglist'] = {'name': clone['name'],
+ 'type': clone_type,
+ 'affinity': extra_specs['affinity'],
+ 'IOPSmin': qos['minIOPS'],
+ 'IOPSmax': qos['maxIOPS'],
+ 'IOPSburst': qos['burstIOPS']}
+ retries = self.configuration.ise_completion_retries
+ resp = self._wait_for_completion(self._help_call_method,
+ args, retries)
+ if resp['status'] != 202:
+ # clone prepare failed - bummer
+ msg = _LE("Prepare clone failed for %s.") % clone['name']
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # clone prepare request accepted
+ # make sure not to continue until clone prepared
+ args['name'] = clone['name']
+ args['status_string'] = PREPARED_STATUS
+ retries = self.configuration.ise_completion_retries
+ clone_info = self._wait_for_completion(self._help_wait_for_status,
+ args, retries)
+ if PREPARED_STATUS in clone_info['details']:
+ LOG.debug('Clone %s prepared.', clone['name'])
+ else:
+ msg = (_LE("Clone %s not in prepared state!") % clone['name'])
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # Clone prepared, now commit the create
+ resp = self._send_cmd('PUT', clone_info['location'],
+ {clone_type: 'true'})
+ if resp['status'] != 201:
+ msg = (_LE("Commit clone failed: %(name)s (%(status)d)!") %
+ {'name': clone['name'], 'status': resp['status']})
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # Clone create request accepted. Make sure not to return until clone
+ # operational.
+ args['name'] = clone['name']
+ args['status_string'] = OPERATIONAL_STATUS
+ retries = self.configuration.ise_completion_retries
+ clone_info = self._wait_for_completion(self._help_wait_for_status,
+ args, retries)
+ if OPERATIONAL_STATUS in clone_info['string']:
+ msg = _LI("Clone %s created."), clone['name']
+ LOG.info(msg)
+ else:
+ msg = _LE("Commit failed for %s!") % clone['name']
+ LOG.error(msg)
+ RaiseXIODriverException()
+ return
+
+ def _fill_in_available_capacity(self, node, pool):
+ """Fill in free capacity info for pool."""
+ available = node.find('available')
+ if available is None:
+ pool['free_capacity_gb'] = 0
+ return pool
+ pool['free_capacity_gb'] = int(available.get('total'))
+ # Fill in separate RAID level cap
+ byred = available.find('byredundancy')
+ if byred is None:
+ return pool
+ raid = byred.find('raid-0')
+ if raid is not None:
+ pool['free_capacity_gb_raid_0'] = int(raid.text)
+ raid = byred.find('raid-1')
+ if raid is not None:
+ pool['free_capacity_gb_raid_1'] = int(raid.text)
+ raid = byred.find('raid-5')
+ if raid is not None:
+ pool['free_capacity_gb_raid_5'] = int(raid.text)
+ raid = byred.find('raid-6')
+ if raid is not None:
+ pool['free_capacity_gb_raid_6'] = int(raid.text)
+ return pool
+
+ def _fill_in_used_capacity(self, node, pool):
+ """Fill in used capacity info for pool."""
+ used = node.find('used')
+ if used is None:
+ pool['allocated_capacity_gb'] = 0
+ return pool
+ pool['allocated_capacity_gb'] = int(used.get('total'))
+ # Fill in separate RAID level cap
+ byred = used.find('byredundancy')
+ if byred is None:
+ return pool
+ raid = byred.find('raid-0')
+ if raid is not None:
+ pool['allocated_capacity_gb_raid_0'] = int(raid.text)
+ raid = byred.find('raid-1')
+ if raid is not None:
+ pool['allocated_capacity_gb_raid_1'] = int(raid.text)
+ raid = byred.find('raid-5')
+ if raid is not None:
+ pool['allocated_capacity_gb_raid_5'] = int(raid.text)
+ raid = byred.find('raid-6')
+ if raid is not None:
+ pool['allocated_capacity_gb_raid_6'] = int(raid.text)
+ return pool
+
+ def _get_pools(self):
+ """Return information about all pools on ISE"""
+ pools = []
+ pool = {}
+ vol_cnt = 0
+ url = '/storage/pools'
+ resp = self._send_cmd('GET', url, {})
+ status = resp['status']
+ if status != 200:
+ # Request failed. Return what we have, which isn't much.
+ msg = _LW("Could not get pool information (%s)!") % status
+ LOG.warning(msg)
+ return (pools, vol_cnt)
+ # Parse out available (free) and used. Add them up to get total.
+ xml_tree = etree.fromstring(resp['content'])
+ for child in xml_tree:
+ if child.tag != 'pool':
+ continue
+ # Fill in ise pool name
+ tag = child.find('name')
+ if tag is not None:
+ pool['pool_ise_name'] = tag.text
+ # Fill in globalid
+ tag = child.find('globalid')
+ if tag is not None:
+ pool['globalid'] = tag.text
+ # Fill in pool name
+ tag = child.find('id')
+ if tag is not None:
+ pool['pool_name'] = tag.text
+ # Fill in pool status
+ tag = child.find('status')
+ if tag is not None:
+ pool['status'] = tag.attrib['string']
+ details = tag.find('details')
+ if details is not None:
+ detail = details.find('detail')
+ if detail is not None:
+ pool['status_details'] = detail.text
+ # Fill in available capacity
+ pool = self._fill_in_available_capacity(child, pool)
+ # Fill in allocated capacity
+ pool = self._fill_in_used_capacity(child, pool)
+ # Fill in media health and type
+ media = child.find('media')
+ if media is not None:
+ medium = media.find('medium')
+ if medium is not None:
+ health = medium.find('health')
+ if health is not None:
+ pool['health'] = int(health.text)
+ tier = medium.find('tier')
+ if tier is not None:
+ pool['media'] = tier.attrib['string']
+ cap = child.find('IOPSmincap')
+ if cap is not None:
+ pool['minIOPS_capacity'] = cap.text
+ cap = child.find('IOPSmaxcap')
+ if cap is not None:
+ pool['maxIOPS_capacity'] = cap.text
+ cap = child.find('IOPSburstcap')
+ if cap is not None:
+ pool['burstIOPS_capacity'] = cap.text
+ pool['total_capacity_gb'] = (int(pool['free_capacity_gb'] +
+ pool['allocated_capacity_gb']))
+ pool['QoS_support'] = self.configuration.ise_qos
+ pool['reserved_percentage'] = 0
+ pools.append(pool)
+ # count volumes
+ volumes = child.find('volumes')
+ if volumes is not None:
+ for volume in volumes:
+ vol_cnt += 1
+ return (pools, vol_cnt)
+
+ def _update_volume_stats(self):
+ """Update storage information"""
+ self._send_query()
+ data = {}
+ data["vendor_name"] = 'X-IO'
+ data["driver_version"] = self._get_version()
+ if self.configuration.volume_backend_name:
+ backend_name = self.configuration.volume_backend_name
+ else:
+ backend_name = self.__class__.__name__
+ data["volume_backend_name"] = backend_name
+ data['reserved_percentage'] = 0
+ # Get total and free capacity.
+ (pools, vol_cnt) = self._get_pools()
+ total_cap = 0
+ free_cap = 0
+ # fill in global capability support
+ # capacity
+ for pool in pools:
+ total_cap += int(pool['total_capacity_gb'])
+ free_cap += int(pool['free_capacity_gb'])
+ data['total_capacity_gb'] = int(total_cap)
+ data['free_capacity_gb'] = int(free_cap)
+ # QoS
+ data['QoS_support'] = self.configuration.ise_qos
+ # Volume affinity
+ data['affinity'] = self.configuration.ise_affinity
+ # Thin provisioning
+ data['thin'] = self.configuration.san_thin_provision
+ data['pools'] = pools
+ data['active_volumes'] = int(vol_cnt)
+ return data
+
+ def get_volume_stats(self, refresh=False):
+ """Get volume stats."""
+ if refresh:
+ self._vol_stats = self._update_volume_stats()
+ LOG.debug("ISE get_volume_stats (total, free): %s, %s",
+ self._vol_stats['total_capacity_gb'],
+ self._vol_stats['free_capacity_gb'])
+ return self._vol_stats
+
+ def _get_extra_specs(self, ctxt, type_id):
+ """Get extra specs from volume type."""
+ specs = {}
+ specs['affinity'] = ''
+ specs['alloctype'] = ''
+ specs['pool'] = self.configuration.ise_storage_pool
+ specs['raid'] = self.configuration.ise_raid
+ if type_id is not None:
+ volume_type = volume_types.get_volume_type(ctxt, type_id)
+ extra_specs = volume_type.get('extra_specs')
+ # Parse out RAID, pool and affinity values
+ for key, value in extra_specs.iteritems():
+ subkey = ''
+ if ':' in key:
+ fields = key.split(':')
+ key = fields[0]
+ subkey = fields[1]
+ if string.upper(key) == string.upper('Feature'):
+ if string.upper(subkey) == string.upper('Raid'):
+ specs['raid'] = value
+ elif string.upper(subkey) == string.upper('Pool'):
+ specs['pool'] = value
+ elif string.upper(key) == string.upper('Affinity'):
+ # Only fill this in if ISE FW supports volume affinity
+ if self.configuration.ise_affinity:
+ if string.upper(subkey) == string.upper('Type'):
+ specs['affinity'] = value
+ elif string.upper(key) == string.upper('Alloc'):
+ # Only fill this in if ISE FW supports thin provisioning
+ if self.configuration.san_thin_provision:
+ if string.upper(subkey) == string.upper('Type'):
+ specs['alloctype'] = value
+ return specs
+
+ def _get_qos_specs(self, ctxt, type_id):
+ """Get QoS specs from volume type."""
+ specs = {}
+ specs['minIOPS'] = ''
+ specs['maxIOPS'] = ''
+ specs['burstIOPS'] = ''
+ if type_id is not None:
+ volume_type = volume_types.get_volume_type(ctxt, type_id)
+ qos_specs_id = volume_type.get('qos_specs_id')
+ if qos_specs_id is not None:
+ kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
+ else:
+ kvs = volume_type.get('extra_specs')
+ # Parse out min, max and burst values
+ for key, value in kvs.iteritems():
+ if ':' in key:
+ fields = key.split(':')
+ key = fields[1]
+ if string.upper(key) == string.upper('minIOPS'):
+ specs['minIOPS'] = value
+ elif string.upper(key) == string.upper('maxIOPS'):
+ specs['maxIOPS'] = value
+ elif string.upper(key) == string.upper('burstIOPS'):
+ specs['burstIOPS'] = value
+ return specs
+
+ def create_volume(self, volume):
+ """Create requested volume"""
+ LOG.debug("X-IO create_volume called.")
+ # get extra_specs and qos based on volume type
+ # these functions fill in default values for entries used below
+ ctxt = context.get_admin_context()
+ type_id = volume['volume_type_id']
+ extra_specs = self._get_extra_specs(ctxt, type_id)
+ LOG.debug("Volume %s extra_specs %s", volume['name'], extra_specs)
+ qos = self._get_qos_specs(ctxt, type_id)
+ # Make create call
+ url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid())
+ resp = self._send_cmd('POST', url,
+ {'name': volume['name'],
+ 'size': volume['size'],
+ 'pool': extra_specs['pool'],
+ 'redundancy': extra_specs['raid'],
+ 'affinity': extra_specs['affinity'],
+ 'alloctype': extra_specs['alloctype'],
+ 'IOPSmin': qos['minIOPS'],
+ 'IOPSmax': qos['maxIOPS'],
+ 'IOPSburst': qos['burstIOPS']})
+ if resp['status'] != 201:
+ msg = (_LE("Failed to create volume: %(name)s (%(status)s)") %
+ {'name': volume['name'], 'status': resp['status']})
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # Good response. Make sure volume is in operational state before
+ # returning. Volume creation completes asynchronously.
+ args = {}
+ args['name'] = volume['name']
+ args['status_string'] = OPERATIONAL_STATUS
+ retries = self.configuration.ise_completion_retries
+ vol_info = self._wait_for_completion(self._help_wait_for_status,
+ args, retries)
+ if OPERATIONAL_STATUS in vol_info['string']:
+ # Ready.
+ msg = _LI("Volume %s created"), volume['name']
+ LOG.info(msg)
+ else:
+ msg = _LE("Failed to create volume %s.") % volume['name']
+ LOG.error(msg)
+ RaiseXIODriverException()
+ return
+
+ def create_cloned_volume(self, volume, src_vref):
+ """Create clone"""
+ LOG.debug("X-IO create_cloned_volume called.")
+ self._create_clone(src_vref, volume, 'clone')
+
+ def create_snapshot(self, snapshot):
+ """Create snapshot"""
+ LOG.debug("X-IO create_snapshot called.")
+ # Creating a snapshot uses same interface as clone operation on
+ # ISE. Clone type ('snapshot' or 'clone') tells the ISE what kind
+ # of operation is requested.
+ self._create_clone(snapshot, snapshot, 'snapshot')
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Create volume from snapshot"""
+ LOG.debug("X-IO create_volume_from_snapshot called.")
+ # ISE snapshots are just like a volume so this is a clone operation.
+ self._create_clone(snapshot, volume, 'clone')
+
+ def _delete_volume(self, volume):
+ """Delete specified volume"""
+ LOG.debug("X-IO delete_volume called.")
+ # First unpresent volume from all hosts.
+ self._alloc_location(volume, '', 1)
+ # Get volume status. Location string for volume comes back
+ # in response. Used for DELETE call below.
+ vol_info = self._get_volume_info(volume['name'])
+ if vol_info['location'] == '':
+ msg = _LW("Delete volume: %s not found!") % volume['name']
+ LOG.warning(msg)
+ return
+ # Make DELETE call.
+ args = {}
+ args['method'] = 'DELETE'
+ args['url'] = vol_info['location']
+ args['arglist'] = {}
+ args['status'] = 204
+ retries = self.configuration.ise_completion_retries
+ resp = self._wait_for_completion(self._help_call_method, args, retries)
+ if resp['status'] == 204:
+ msg = (_LI("Volume %s deleted."), volume['name'])
+ LOG.info(msg)
+ return
+
+ def delete_volume(self, volume):
+ """Delete specified volume"""
+ LOG.debug("X-IO delete_volume called.")
+ self._delete_volume(volume)
+
+ def delete_snapshot(self, snapshot):
+ """Delete snapshot"""
+ LOG.debug("X-IO delete_snapshot called.")
+ # Delete snapshot and delete volume is identical to ISE.
+ self._delete_volume(snapshot)
+
+ def _modify_volume(self, volume, new_attributes):
+ # Get volume status. Location string for volume comes back
+ # in response. Used for PUT call below.
+ vol_info = self._get_volume_info(volume['name'])
+ if vol_info['location'] == '':
+ msg = _LE("modify volume: %s does not exist!") % volume['name']
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # Make modify volume REST call using PUT.
+ # Location from above is used as identifier.
+ resp = self._send_cmd('PUT', vol_info['location'], new_attributes)
+ status = resp['status']
+ if status == 201:
+ LOG.debug("Volume %s modified.", volume['name'])
+ return True
+ msg = (_LE("Modify volume PUT failed: %(name)s (%(status)d).") %
+ {'name': volume['name'], 'status': status})
+ LOG.error(msg)
+ RaiseXIODriverException()
+
+ def extend_volume(self, volume, new_size):
+ """Extend volume to new size."""
+ LOG.debug("extend_volume called")
+ ret = self._modify_volume(volume, {'size': new_size})
+ if ret is True:
+ msg = (_LI("volume %(name)s extended to %(size)d."),
+ {'name': volume['name'], 'size': new_size})
+ LOG.info(msg)
+ return
+
+ def retype(self, ctxt, volume, new_type, diff, host):
+ """Convert the volume to be of the new type."""
+ LOG.debug("X-IO retype called")
+ qos = self._get_qos_specs(ctxt, new_type['id'])
+ ret = self._modify_volume(volume, {'IOPSmin': qos['minIOPS'],
+ 'IOPSmax': qos['maxIOPS'],
+ 'IOPSburst': qos['burstIOPS']})
+ if ret is True:
+ msg = _LI("Volume %s retyped."), volume['name']
+ LOG.info(msg)
+ return True
+
+ def manage_existing(self, volume, ise_volume_ref):
+ """Convert an existing ISE volume to a Cinder volume."""
+ LOG.debug("X-IO manage_existing called")
+ if 'source-name' not in ise_volume_ref:
+ msg = _LE("manage_existing: No source-name in ref!")
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # copy the source-name to 'name' for modify volume use
+ ise_volume_ref['name'] = ise_volume_ref['source-name']
+ ctxt = context.get_admin_context()
+ qos = self._get_qos_specs(ctxt, volume['volume_type_id'])
+ ret = self._modify_volume(ise_volume_ref,
+ {'name': volume['name'],
+ 'IOPSmin': qos['minIOPS'],
+ 'IOPSmax': qos['maxIOPS'],
+ 'IOPSburst': qos['burstIOPS']})
+ if ret is True:
+ msg = _LI("Volume %s converted."), ise_volume_ref['name']
+ LOG.info(msg)
+ return ret
+
+ def manage_existing_get_size(self, volume, ise_volume_ref):
+ """Get size of an existing ISE volume."""
+ LOG.debug("X-IO manage_existing_get_size called")
+ if 'source-name' not in ise_volume_ref:
+ msg = _LE("manage_existing_get_size: No source-name in ref!")
+ LOG.error(msg)
+ RaiseXIODriverException()
+ ref_name = ise_volume_ref['source-name']
+ # get volume status including size
+ vol_info = self._get_volume_info(ref_name)
+ if vol_info['location'] == '':
+ msg = (_LE("manage_existing_get_size: %s does not exist!") %
+ ref_name)
+ LOG.error(msg)
+ RaiseXIODriverException()
+ return int(vol_info['size'])
+
+ def unmanage(self, volume):
+ """Remove Cinder management from ISE volume"""
+ LOG.debug("X-IO unmanage called")
+ vol_info = self._get_volume_info(volume['name'])
+ if vol_info['location'] == '':
+ msg = _LE("unmanage: Volume %s does not exist!") % volume['name']
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # This is a noop. ISE does not store any Cinder specific information.
+
+ def ise_present(self, volume, hostname_in, endpoints):
+ """Set up presentation for volume and specified connector"""
+ LOG.debug("X-IO ise_present called.")
+ # Create host entry on ISE if necessary.
+ # Check to see if host entry already exists.
+ # Create if not found
+ host = self._find_host(endpoints)
+ if host['name'] == '':
+ # host not found, so create new host entry
+ # Use host name if filled in. If blank, ISE will make up a name.
+ self._create_host(hostname_in, endpoints)
+ host = self._find_host(endpoints)
+ if host['name'] == '':
+ # host still not found, this is fatal.
+ msg = _LE("Host could not be found!")
+ LOG.error(msg)
+ RaiseXIODriverException()
+ elif string.upper(host['type']) != 'OPENSTACK':
+ # Make sure host type is marked as Openstack host
+ params = {'os': 'openstack'}
+ resp = self._send_cmd('PUT', host['locator'], params)
+ status = resp['status']
+ if status != 201 and status != 409:
+ msg = _LE("Host PUT failed (%s).") % status
+ LOG.error(msg)
+ RaiseXIODriverException()
+ # We have a host object.
+ target_lun = ''
+ # Present volume to host.
+ target_lun = self._present_volume(volume, host['name'], target_lun)
+ # Fill in target information.
+ data = {}
+ data['target_lun'] = target_lun
+ data['volume_id'] = volume['id']
+ data['access_mode'] = 'rw'
+ return data
+
+ def ise_unpresent(self, volume, endpoints):
+ """Delete presentation between volume and connector"""
+ LOG.debug("X-IO ise_unpresent called.")
+ # Delete allocation uses host name. Go find it based on endpoints.
+ host = self._find_host(endpoints)
+ if host['name'] != '':
+ # Delete allocation based on hostname and volume.
+ self._alloc_location(volume, host['name'], 1)
+ return host['name']
+
+ def create_export(self, context, volume):
+ LOG.debug("X-IO create_export called.")
+
+ def ensure_export(self, context, volume):
+ LOG.debug("X-IO ensure_export called.")
+
+ def remove_export(self, context, volume):
+ LOG.debug("X-IO remove_export called.")
+
+ def local_path(self, volume):
+ LOG.debug("X-IO local_path called.")
+
+
+# Protocol specific classes for entry. They are wrappers around base class
+# above and every external API resuslts in a call to common function in base
+# class.
+class XIOISEISCSIDriver(driver.ISCSIDriver):
+
+ """Requires ISE Running FW version 3.1.0 or higher"""
+
+ def __init__(self, *args, **kwargs):
+ super(XIOISEISCSIDriver, self).__init__(*args, **kwargs)
+ self.configuration.append_config_values(XIO_OPTS)
+ self.configuration.append_config_values(san.san_opts)
+
+ # The iscsi_ip_address must always be set.
+ if self.configuration.iscsi_ip_address == '':
+ err_msg = _LE("iscsi_ip_address must be set!")
+ LOG.error(err_msg)
+ RaiseXIODriverException()
+ # Setup common driver
+ self.driver = XIOISEDriver(configuration=self.configuration)
+
+ def do_setup(self, context):
+ return self.driver.do_setup(context)
+
+ def check_for_setup_error(self):
+ return self.driver.check_for_setup_error()
+
+ def local_path(self, volume):
+ return self.driver.local_path(volume)
+
+ def get_volume_stats(self, refresh=False):
+ data = self.driver.get_volume_stats(refresh)
+ data["storage_protocol"] = 'iSCSI'
+ return data
+
+ def create_volume(self, volume):
+ self.driver.create_volume(volume)
+ # Volume created successfully. Fill in CHAP information.
+ model_update = {}
+ chap = {}
+ chap = self.driver.find_target_chap()
+ if chap['chap_user'] != '':
+ model_update['provider_auth'] = 'CHAP %s %s' % \
+ (chap['chap_user'], chap['chap_passwd'])
+ else:
+ model_update['provider_auth'] = ''
+ return model_update
+
+ def create_cloned_volume(self, volume, src_vref):
+ return self.driver.create_cloned_volume(volume, src_vref)
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ return self.driver.create_volume_from_snapshot(volume, snapshot)
+
+ def delete_volume(self, volume):
+ return self.driver.delete_volume(volume)
+
+ def extend_volume(self, volume, new_size):
+ return self.driver.extend_volume(volume, new_size)
+
+ def retype(self, ctxt, volume, new_type, diff, host):
+ return self.driver.retype(ctxt, volume, new_type, diff, host)
+
+ def manage_existing(self, volume, ise_volume_ref):
+ ret = self.driver.manage_existing(volume, ise_volume_ref)
+ if ret is True:
+ # Volume converted successfully. Fill in CHAP information.
+ model_update = {}
+ chap = {}
+ chap = self.driver.find_target_chap()
+ if chap['chap_user'] != '':
+ model_update['provider_auth'] = 'CHAP %s %s' % \
+ (chap['chap_user'], chap['chap_passwd'])
+ else:
+ model_update['provider_auth'] = ''
+ return model_update
+
+ def manage_existing_get_size(self, volume, ise_volume_ref):
+ return self.driver.manage_existing_get_size(volume, ise_volume_ref)
+
+ def unmanage(self, volume):
+ return self.driver.unmanage(volume)
+
+ def initialize_connection(self, volume, connector):
+ hostname = ''
+ if 'host' in connector:
+ hostname = connector['host']
+ data = self.driver.ise_present(volume, hostname,
+ connector['initiator'])
+ # find IP for target
+ data['target_portal'] = \
+ '%s:3260' % (self.configuration.iscsi_ip_address)
+ # set IQN for target
+ data['target_discovered'] = False
+ data['target_iqn'] = \
+ self.driver.find_target_iqn(self.configuration.iscsi_ip_address)
+ # Fill in authentication method (CHAP)
+ if 'provider_auth' in volume:
+ auth = volume['provider_auth']
+ if auth:
+ (auth_method, auth_username, auth_secret) = auth.split()
+ data['auth_method'] = auth_method
+ data['auth_username'] = auth_username
+ data['auth_password'] = auth_secret
+ return {'driver_volume_type': 'iscsi',
+ 'data': data}
+
+ def terminate_connection(self, volume, connector, **kwargs):
+ return self.driver.ise_unpresent(volume, connector['initiator'])
+
+ def create_snapshot(self, snapshot):
+ return self.driver.create_snapshot(snapshot)
+
+ def delete_snapshot(self, snapshot):
+ return self.driver.delete_snapshot(snapshot)
+
+ def create_export(self, context, volume):
+ return self.driver.create_export(context, volume)
+
+ def ensure_export(self, context, volume):
+ return self.driver.ensure_export(context, volume)
+
+ def remove_export(self, context, volume):
+ return self.driver.remove_export(context, volume)
+
+
+class XIOISEFCDriver(driver.FibreChannelDriver):
+
+ """Requires ISE Running FW version 2.8.0 or higher"""
+
+ def __init__(self, *args, **kwargs):
+ super(XIOISEFCDriver, self).__init__(*args, **kwargs)
+ self.configuration.append_config_values(XIO_OPTS)
+ self.configuration.append_config_values(san.san_opts)
+ self.driver = XIOISEDriver(configuration=self.configuration)
+
+ def do_setup(self, context):
+ return self.driver.do_setup(context)
+
+ def check_for_setup_error(self):
+ return self.driver.check_for_setup_error()
+
+ def local_path(self, volume):
+ return self.driver.local_path(volume)
+
+ def get_volume_stats(self, refresh=False):
+ data = self.driver.get_volume_stats(refresh)
+ data["storage_protocol"] = 'fibre_channel'
+ return data
+
+ def create_volume(self, volume):
+ return self.driver.create_volume(volume)
+
+ def create_cloned_volume(self, volume, src_vref):
+ return self.driver.create_cloned_volume(volume, src_vref)
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ return self.driver.create_volume_from_snapshot(volume, snapshot)
+
+ def delete_volume(self, volume):
+ return self.driver.delete_volume(volume)
+
+ def extend_volume(self, volume, new_size):
+ return self.driver.extend_volume(volume, new_size)
+
+ def retype(self, ctxt, volume, new_type, diff, host):
+ return self.driver.retype(ctxt, volume, new_type, diff, host)
+
+ def manage_existing(self, volume, ise_volume_ref):
+ return self.driver.manage_existing(volume, ise_volume_ref)
+
+ def manage_existing_get_size(self, volume, ise_volume_ref):
+ return self.driver.manage_existing_get_size(volume, ise_volume_ref)
+
+ def unmanage(self, volume):
+ return self.driver.unmanage(volume)
+
+ @fczm_utils.AddFCZone
+ def initialize_connection(self, volume, connector):
+ hostname = ''
+ if 'host' in connector:
+ hostname = connector['host']
+ data = self.driver.ise_present(volume, hostname, connector['wwpns'])
+ data['target_discovered'] = True
+ # set wwns for target
+ target_wwns = self.driver.find_target_wwns()
+ data['target_wwn'] = target_wwns
+ # build target initiator map
+ target_map = {}
+ for initiator in connector['wwpns']:
+ target_map[initiator] = target_wwns
+ data['initiator_target_map'] = target_map
+ return {'driver_volume_type': 'fibre_channel',
+ 'data': data}
+
+ @fczm_utils.RemoveFCZone
+ def terminate_connection(self, volume, connector, **kwargs):
+ # now we are ready to tell ISE to delete presentations
+ hostname = self.driver.ise_unpresent(volume, connector['wwpns'])
+ # set target_wwn and initiator_target_map only if host
+ # has no more presentations
+ data = {}
+ alloc_cnt = 0
+ if hostname != '':
+ alloc_cnt = self.driver.find_allocations(hostname)
+ if alloc_cnt == 0:
+ target_wwns = self.driver.find_target_wwns()
+ data['target_wwn'] = target_wwns
+ # build target initiator map
+ target_map = {}
+ for initiator in connector['wwpns']:
+ target_map[initiator] = target_wwns
+ data['initiator_target_map'] = target_map
+ return {'driver_volume_type': 'fibre_channel',
+ 'data': data}
+
+ def create_snapshot(self, snapshot):
+ return self.driver.create_snapshot(snapshot)
+
+ def delete_snapshot(self, snapshot):
+ return self.driver.delete_snapshot(snapshot)
+
+ def create_export(self, context, volume):
+ return self.driver.create_export(context, volume)
+
+ def ensure_export(self, context, volume):
+ return self.driver.ensure_export(context, volume)
+
+ def remove_export(self, context, volume):
+ return self.driver.remove_export(context, volume)