From d856a6cf2efdd440134685d4796f5f5e0c2a8337 Mon Sep 17 00:00:00 2001 From: Richard Hedlind Date: Mon, 20 Jul 2015 14:41:12 +0000 Subject: [PATCH] Revert "Remove X-IO volume driver" This reverts commit 45e6482998478c953edd6973c2d8304efb910bdc. Change-Id: I9b53fae7ab7462e3f6f90fdf573f9324a26e0f7c --- cinder/exception.py | 5 + cinder/tests/unit/test_xio.py | 1459 ++++++++++++++++++++++++++++++ cinder/volume/drivers/xio.py | 1605 +++++++++++++++++++++++++++++++++ 3 files changed, 3069 insertions(+) create mode 100644 cinder/tests/unit/test_xio.py create mode 100644 cinder/volume/drivers/xio.py diff --git a/cinder/exception.py b/cinder/exception.py index 690e22dec..a8db1b040 100644 --- a/cinder/exception.py +++ b/cinder/exception.py @@ -884,6 +884,11 @@ class ISCSITargetHelperCommandFailed(CinderException): message = _("%(error_message)s") +# X-IO driver exception. +class XIODriverException(VolumeDriverException): + message = _("X-IO Volume Driver exception!") + + # Violin Memory drivers class ViolinInvalidBackendConfig(CinderException): message = _("Volume backend config is invalid: %(reason)s") diff --git a/cinder/tests/unit/test_xio.py b/cinder/tests/unit/test_xio.py new file mode 100644 index 000000000..955c6f8fc --- /dev/null +++ b/cinder/tests/unit/test_xio.py @@ -0,0 +1,1459 @@ +# Copyright (c) 2014 X-IO Technologies. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_log import log as logging + +from cinder import context +from cinder import exception +from cinder import test +from cinder.tests.unit import utils +from cinder.volume.drivers import xio +from cinder.volume import qos_specs +from cinder.volume import volume_types + +LOG = logging.getLogger("cinder.volume.driver") + +ISE_IP1 = '10.12.12.1' +ISE_IP2 = '10.11.12.2' +ISE_ISCSI_IP1 = '1.2.3.4' +ISE_ISCSI_IP2 = '1.2.3.5' + +ISE_GID = 'isegid' +ISE_IQN = ISE_GID +ISE_WWN1 = ISE_GID + '1' +ISE_WWN2 = ISE_GID + '2' +ISE_WWN3 = ISE_GID + '3' +ISE_WWN4 = ISE_GID + '4' +ISE_TARGETS = [ISE_WWN1, ISE_WWN2, ISE_WWN3, ISE_WWN4] +ISE_INIT_TARGET_MAP = {'init_wwn1': ISE_TARGETS, + 'init_wwn2': ISE_TARGETS} + +VOLUME_SIZE = 10 +NEW_VOLUME_SIZE = 20 + +VOLUME1 = {'id': '1', 'name': 'volume1', + 'size': VOLUME_SIZE, 'volume_type_id': 'type1'} + +VOLUME2 = {'id': '2', 'name': 'volume2', + 'size': VOLUME_SIZE, 'volume_type_id': 'type2', + 'provider_auth': 'CHAP abc abc'} + +VOLUME3 = {'id': '3', 'name': 'volume3', + 'size': VOLUME_SIZE, 'volume_type_id': None} + +SNAPSHOT1 = {'name': 'snapshot1', + 'volume_name': VOLUME1['name'], + 'volume_type_id': 'type3'} + +CLONE1 = {'id': '3', 'name': 'clone1', + 'size': VOLUME_SIZE, 'volume_type_id': 'type4'} + +HOST1 = 'host1' + +HOST2 = 'host2' + +ISCSI_CONN1 = {'initiator': 'init_iqn1', + 'host': HOST1} + +ISCSI_CONN2 = {'initiator': 'init_iqn2', + 'host': HOST2} + +FC_CONN1 = {'wwpns': ['init_wwn1', 'init_wwn2'], + 'host': HOST1} + +FC_CONN2 = {'wwpns': ['init_wwn3', 'init_wwn4'], + 'host': HOST2} + +ISE_HTTP_IP = 'http://' + ISE_IP1 + +ISE_HOST_LOCATION = '/storage/hosts/1' +ISE_HOST_LOCATION_URL = ISE_HTTP_IP + ISE_HOST_LOCATION + +ISE_VOLUME1_LOCATION = '/storage/volumes/volume1' +ISE_VOLUME1_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME1_LOCATION +ISE_VOLUME2_LOCATION = '/storage/volumes/volume2' +ISE_VOLUME2_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME2_LOCATION +ISE_VOLUME3_LOCATION = '/storage/volumes/volume3' +ISE_VOLUME3_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME3_LOCATION + +ISE_SNAPSHOT_LOCATION = '/storage/volumes/snapshot1' +ISE_SNAPSHOT_LOCATION_URL = ISE_HTTP_IP + ISE_SNAPSHOT_LOCATION + +ISE_CLONE_LOCATION = '/storage/volumes/clone1' +ISE_CLONE_LOCATION_URL = ISE_HTTP_IP + ISE_CLONE_LOCATION + +ISE_ALLOCATION_LOCATION = '/storage/allocations/a1' +ISE_ALLOCATION_LOCATION_URL = ISE_HTTP_IP + ISE_ALLOCATION_LOCATION + +ISE_GET_QUERY_XML =\ + """ + ABC12345 + + + + + + + + + + %s + + + + %s + + + + """ % (ISE_IP1, ISE_IP2) + +ISE_GET_QUERY_RESP =\ + {'status': 200, + 'location': '', + 'content': " ".join(ISE_GET_QUERY_XML.split())} + +ISE_GET_QUERY_NO_CAP_XML =\ + """ + ABC12345 + + + %s + + + + %s + + + + """ % (ISE_IP1, ISE_IP2) + +ISE_GET_QUERY_NO_CAP_RESP =\ + {'status': 200, + 'location': '', + 'content': " ".join(ISE_GET_QUERY_NO_CAP_XML.split())} + +ISE_GET_QUERY_NO_CTRL_XML =\ + """ + ABC12345 + + + + + + + + + + """ + +ISE_GET_QUERY_NO_CTRL_RESP =\ + {'status': 200, + 'location': '', + 'content': " ".join(ISE_GET_QUERY_NO_CTRL_XML.split())} + +ISE_GET_QUERY_NO_IP_XML =\ + """ + ABC12345 + + + + + + + + + + + + + + + + + + + + """ + +ISE_GET_QUERY_NO_IP_RESP =\ + {'status': 200, + 'location': '', + 'content': " ".join(ISE_GET_QUERY_NO_IP_XML.split())} + +ISE_GET_QUERY_NO_GID_XML =\ + """ + + + + + + + + + + %s + + + + %s + + + + """ % (ISE_IP1, ISE_IP2) + +ISE_GET_QUERY_NO_GID_RESP =\ + {'status': 200, + 'location': '', + 'content': " ".join(ISE_GET_QUERY_NO_GID_XML.split())} + +ISE_GET_QUERY_NO_CLONE_XML =\ + """ + ABC12345 + + + + + + + + + %s + + + + %s + + + + """ % (ISE_IP1, ISE_IP2) + +ISE_GET_QUERY_NO_CLONE_RESP =\ + {'status': 200, + 'location': '', + 'content': " ".join(ISE_GET_QUERY_NO_CLONE_XML.split())} + +ISE_GET_STORAGE_POOLS_XML =\ + """ + + + Pool 1 + 1 + +
+ None +
+
+ + + 60 + 30 + 45 + + + + + 0 + 40 + 0 + + + + + 100 + + + + + + volgid + + + volgid2 + + +
+
+ """ + +ISE_GET_STORAGE_POOLS_RESP =\ + {'status': 200, + 'location': 'Pool location', + 'content': " ".join(ISE_GET_STORAGE_POOLS_XML.split())} + +ISE_GET_VOL_STATUS_NO_VOL_NODE_XML =\ + """""" + +ISE_GET_VOL_STATUS_NO_VOL_NODE_RESP =\ + {'status': 200, + 'location': 'u%s' % ISE_VOLUME1_LOCATION_URL, + 'content': " ".join(ISE_GET_VOL_STATUS_NO_VOL_NODE_XML.split())} + +ISE_GET_VOL_STATUS_NO_STATUS_XML =\ + """ + + + """ % (ISE_VOLUME1_LOCATION_URL) + +ISE_GET_VOL_STATUS_NO_STATUS_RESP =\ + {'status': 200, + 'location': 'u%s' % ISE_VOLUME1_LOCATION_URL, + 'content': " ".join(ISE_GET_VOL_STATUS_NO_STATUS_XML.split())} + +ISE_GET_VOL1_STATUS_XML =\ + """ + + +
+ Prepared +
+
+ 10 +
+
""" % (ISE_VOLUME1_LOCATION_URL) + +ISE_GET_VOL1_STATUS_RESP =\ + {'status': 200, + 'location': 'u%s' % ISE_VOLUME1_LOCATION_URL, + 'content': " ".join(ISE_GET_VOL1_STATUS_XML.split())} + +ISE_GET_VOL2_STATUS_XML =\ + """ + + +
+ Prepared +
+
+
+
""" % (ISE_VOLUME2_LOCATION_URL) + +ISE_GET_VOL2_STATUS_RESP =\ + {'status': 200, + 'location': 'u%s' % ISE_VOLUME2_LOCATION_URL, + 'content': " ".join(ISE_GET_VOL2_STATUS_XML.split())} + +ISE_GET_VOL3_STATUS_XML =\ + """ + + +
+ Prepared +
+
+
+
""" % (ISE_VOLUME3_LOCATION_URL) + +ISE_GET_VOL3_STATUS_RESP =\ + {'status': 200, + 'location': 'u%s' % ISE_VOLUME3_LOCATION_URL, + 'content': " ".join(ISE_GET_VOL3_STATUS_XML.split())} + +ISE_GET_SNAP1_STATUS_XML =\ + """ + + +
+ Prepared +
+
+
+
""" % (ISE_SNAPSHOT_LOCATION_URL) + +ISE_GET_SNAP1_STATUS_RESP =\ + {'status': 200, + 'location': 'u%s' % ISE_SNAPSHOT_LOCATION_URL, + 'content': " ".join(ISE_GET_SNAP1_STATUS_XML.split())} + +ISE_GET_CLONE1_STATUS_XML =\ + """ + + +
+ Prepared +
+
+
+
""" % (ISE_CLONE_LOCATION_URL) + +ISE_GET_CLONE1_STATUS_RESP =\ + {'status': 200, + 'location': 'u%s' % ISE_CLONE_LOCATION_URL, + 'content': " ".join(ISE_GET_CLONE1_STATUS_XML.split())} + +ISE_CREATE_VOLUME_XML = """""" + +ISE_CREATE_VOLUME_RESP =\ + {'status': 201, + 'location': ISE_VOLUME1_LOCATION_URL, + 'content': " ".join(ISE_CREATE_VOLUME_XML.split())} + +ISE_GET_IONETWORKS_XML =\ + """ + + + + + + + + + """ + +ISE_GET_IONETWORKS_RESP =\ + {'status': 200, + 'location': '', + 'content': " ".join(ISE_GET_IONETWORKS_XML.split())} + +ISE_GET_IONETWORKS_CHAP_XML =\ + """ + + abc + abc + + + + + + """ + +ISE_GET_IONETWORKS_CHAP_RESP =\ + {'status': 200, + 'location': '', + 'content': " ".join(ISE_GET_IONETWORKS_CHAP_XML.split())} + +ISE_DELETE_VOLUME_XML = """""" + +ISE_DELETE_VOLUME_RESP =\ + {'status': 204, + 'location': '', + 'content': " ".join(ISE_DELETE_VOLUME_XML.split())} + +ISE_GET_ALLOC_WITH_EP_XML =\ + """ + + + %s + + + %s + + 1 + + """ %\ + (ISE_ALLOCATION_LOCATION_URL, VOLUME1['name'], HOST1) + +ISE_GET_ALLOC_WITH_EP_RESP =\ + {'status': 200, + 'location': ISE_ALLOCATION_LOCATION_URL, + 'content': " ".join(ISE_GET_ALLOC_WITH_EP_XML.split())} + +ISE_GET_ALLOC_WITH_NO_ALLOC_XML =\ + """""" % ISE_ALLOCATION_LOCATION_URL + +ISE_GET_ALLOC_WITH_NO_ALLOC_RESP =\ + {'status': 200, + 'location': ISE_ALLOCATION_LOCATION_URL, + 'content': " ".join(ISE_GET_ALLOC_WITH_NO_ALLOC_XML.split())} + +ISE_DELETE_ALLOC_XML = """""" + +ISE_DELETE_ALLOC_RESP =\ + {'status': 204, + 'location': '', + 'content': " ".join(ISE_DELETE_ALLOC_XML.split())} + +ISE_GET_HOSTS_NOHOST_XML =\ + """""" + +ISE_GET_HOSTS_NOHOST_RESP =\ + {'status': 200, + 'location': '', + 'content': " ".join(ISE_GET_HOSTS_NOHOST_XML.split())} + +ISE_GET_HOSTS_HOST1_XML =\ + """ + + "OPENSTACK" + %s + 1 + + + init_wwn1 + + + init_wwn2 + + + init_iqn1 + + + + """ % HOST1 + +ISE_GET_HOSTS_HOST1_RESP =\ + {'status': 200, + 'location': '', + 'content': " ".join(ISE_GET_HOSTS_HOST1_XML.split())} + +ISE_GET_HOSTS_HOST1_HOST_TYPE_XML =\ + """ + + "WINDOWS" + %s + 1 + + + init_wwn1 + + + init_wwn2 + + + init_iqn1 + + + + """ % HOST1 + +ISE_GET_HOSTS_HOST1_HOST_TYPE_RESP =\ + {'status': 200, + 'location': '', + 'content': " ".join(ISE_GET_HOSTS_HOST1_HOST_TYPE_XML.split())} + +ISE_GET_HOSTS_HOST2_XML =\ + """ + + %s + 2 + + + init_wwn3 + + + init_wwn4 + + + init_iqn2 + + + + """ % HOST2 + +ISE_GET_HOSTS_HOST2_RESP =\ + {'status': 200, + 'location': '', + 'content': " ".join(ISE_GET_HOSTS_HOST2_XML.split())} + +ISE_CREATE_HOST_XML =\ + """""" + +ISE_CREATE_HOST_RESP =\ + {'status': 201, + 'location': 'http://ip/storage/hosts/host1', + 'content': " ".join(ISE_CREATE_HOST_XML.split())} + +ISE_CREATE_ALLOC_XML =\ + """""" + +ISE_CREATE_ALLOC_RESP =\ + {'status': 201, + 'location': ISE_ALLOCATION_LOCATION_URL, + 'content': " ".join(ISE_CREATE_ALLOC_XML.split())} + +ISE_GET_ENDPOINTS_XML =\ + """ + + isegid + iSCSI + + ise1 + + + + + + a1 + + + + + + isegid + Fibre Channel + + ise1 + + + + + + a1 + + + + + """ % (ISE_ALLOCATION_LOCATION_URL, + ISE_ALLOCATION_LOCATION_URL) + +ISE_GET_ENDPOINTS_RESP =\ + {'status': 200, + 'location': '', + 'content': " ".join(ISE_GET_ENDPOINTS_XML.split())} + +ISE_GET_CONTROLLERS_XML =\ + """ + + + + + + %s + + + isegid + + + + + + %s + + + %s + + + + + + + + + %s + + + isegid + + + + + + %s + + + %s + + + + """ % (ISE_ISCSI_IP1, ISE_WWN1, ISE_WWN2, + ISE_ISCSI_IP2, ISE_WWN3, ISE_WWN4) + +ISE_GET_CONTROLLERS_RESP =\ + {'status': 200, + 'location': '', + 'content': " ".join(ISE_GET_CONTROLLERS_XML.split())} + +ISE_CREATE_SNAPSHOT_XML = """""" + +ISE_CREATE_SNAPSHOT_RESP =\ + {'status': 201, + 'location': ISE_SNAPSHOT_LOCATION_URL, + 'content': " ".join(ISE_CREATE_SNAPSHOT_XML.split())} + +ISE_PREP_SNAPSHOT_XML = """""" + +ISE_PREP_SNAPSHOT_RESP =\ + {'status': 202, + 'location': ISE_SNAPSHOT_LOCATION_URL, + 'content': " ".join(ISE_PREP_SNAPSHOT_XML.split())} + +ISE_MODIFY_VOLUME_XML = """""" + +ISE_MODIFY_VOLUME_RESP =\ + {'status': 201, + 'location': ISE_VOLUME1_LOCATION_URL, + 'content': " ".join(ISE_MODIFY_VOLUME_XML.split())} + +ISE_MODIFY_HOST_XML = """""" + +ISE_MODIFY_HOST_RESP =\ + {'status': 201, + 'location': ISE_HOST_LOCATION_URL, + 'content': " ".join(ISE_MODIFY_HOST_XML.split())} + +ISE_BAD_CONNECTION_RESP =\ + {'status': 0, + 'location': '', + 'content': " "} + +ISE_400_RESP =\ + {'status': 400, + 'location': '', + 'content': ""} + +ISE_GET_VOL_STATUS_404_XML = \ + """VOLUME not found.""" + +ISE_GET_VOL_STATUS_404_RESP =\ + {'status': 404, + 'location': '', + 'content': " ".join(ISE_GET_VOL_STATUS_404_XML.split())} + +ISE_400_INVALID_STATE_XML = \ + """Not in a valid state.""" + +ISE_400_INVALID_STATE_RESP =\ + {'status': 400, + 'location': '', + 'content': " ".join(ISE_400_INVALID_STATE_XML.split())} + +ISE_409_CONFLICT_XML = \ + """Conflict""" + +ISE_409_CONFLICT_RESP =\ + {'status': 409, + 'location': '', + 'content': " ".join(ISE_409_CONFLICT_XML.split())} + + +DRIVER = "cinder.volume.drivers.xio.XIOISEDriver" + + +@mock.patch(DRIVER + "._opener", autospec=True) +class XIOISEDriverTestCase(object): + + # Test cases for X-IO volume driver + + def setUp(self): + super(XIOISEDriverTestCase, self).setUp() + + # set good default values + self.configuration = mock.Mock() + self.configuration.san_ip = ISE_IP1 + self.configuration.san_user = 'fakeuser' + self.configuration.san_password = 'fakepass' + self.configuration.iscsi_ip_address = ISE_ISCSI_IP1 + self.configuration.driver_use_ssl = False + self.configuration.ise_completion_retries = 30 + self.configuration.ise_connection_retries = 5 + self.configuration.ise_retry_interval = 1 + self.configuration.volume_backend_name = 'ise1' + self.driver = None + self.protocol = '' + self.connector = None + self.connection_failures = 0 + self.hostgid = '' + self.use_response_table = 1 + + def setup_test(self, protocol): + self.protocol = protocol + + # set good default values + if self.protocol == 'iscsi': + self.configuration.ise_protocol = protocol + self.connector = ISCSI_CONN1 + self.hostgid = self.connector['initiator'] + elif self.protocol == 'fibre_channel': + self.configuration.ise_protocol = protocol + self.connector = FC_CONN1 + self.hostgid = self.connector['wwpns'][0] + + def setup_driver(self): + # this setups up driver object with previously set configuration values + if self.configuration.ise_protocol == 'iscsi': + self.driver =\ + xio.XIOISEISCSIDriver(configuration=self.configuration) + elif self.configuration.ise_protocol == 'fibre_channel': + self.driver =\ + xio.XIOISEFCDriver(configuration=self.configuration) + elif self.configuration.ise_protocol == 'test_prot': + # if test_prot specified override with correct protocol + # used to bypass protocol specific driver + self.configuration.ise_protocol = self.protocol + self.driver = xio.XIOISEDriver(configuration=self.configuration) + else: + # Invalid protocol type + raise exception.Invalid() + +################################# +# UNIT TESTS # +################################# + def test_do_setup(self, mock_req): + self.setup_driver() + mock_req.side_effect = iter([ISE_GET_QUERY_RESP]) + self.driver.do_setup(None) + + def test_negative_do_setup_no_clone_support(self, mock_req): + self.setup_driver() + mock_req.side_effect = iter([ISE_GET_QUERY_NO_CLONE_RESP]) + self.assertRaises(exception.XIODriverException, + self.driver.do_setup, None) + + def test_negative_do_setup_no_capabilities(self, mock_req): + self.setup_driver() + mock_req.side_effect = iter([ISE_GET_QUERY_NO_CAP_RESP]) + self.assertRaises(exception.XIODriverException, + self.driver.do_setup, None) + + def test_negative_do_setup_no_ctrl(self, mock_req): + self.setup_driver() + mock_req.side_effect = iter([ISE_GET_QUERY_NO_CTRL_RESP]) + self.assertRaises(exception.XIODriverException, + self.driver.do_setup, None) + + def test_negative_do_setup_no_ipaddress(self, mock_req): + self.setup_driver() + mock_req.side_effect = iter([ISE_GET_QUERY_NO_IP_RESP]) + self.driver.do_setup(None) + + def test_negative_do_setup_bad_globalid_none(self, mock_req): + self.setup_driver() + mock_req.side_effect = iter([ISE_GET_QUERY_NO_GID_RESP]) + self.assertRaises(exception.XIODriverException, + self.driver.do_setup, None) + + def test_check_for_setup_error(self, mock_req): + mock_req.side_effect = iter([ISE_GET_QUERY_RESP]) + self.setup_driver() + self.driver.check_for_setup_error() + + def test_negative_do_setup_bad_ip(self, mock_req): + # set san_ip to bad value + self.configuration.san_ip = '' + mock_req.side_effect = iter([ISE_GET_QUERY_RESP]) + self.setup_driver() + self.assertRaises(exception.XIODriverException, + self.driver.check_for_setup_error) + + def test_negative_do_setup_bad_user_blank(self, mock_req): + # set san_user to bad value + self.configuration.san_login = '' + mock_req.side_effect = iter([ISE_GET_QUERY_RESP]) + self.setup_driver() + self.assertRaises(exception.XIODriverException, + self.driver.check_for_setup_error) + + def test_negative_do_setup_bad_password_blank(self, mock_req): + # set san_password to bad value + self.configuration.san_password = '' + mock_req.side_effect = iter([ISE_GET_QUERY_RESP]) + self.setup_driver() + self.assertRaises(exception.XIODriverException, + self.driver.check_for_setup_error) + + def test_get_volume_stats(self, mock_req): + self.setup_driver() + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_STORAGE_POOLS_RESP]) + + backend_name = self.configuration.volume_backend_name + if self.configuration.ise_protocol == 'iscsi': + protocol = 'iSCSI' + else: + protocol = 'fibre_channel' + exp_result = {} + exp_result = {'vendor_name': "X-IO", + 'driver_version': "1.1.2", + 'volume_backend_name': backend_name, + 'reserved_percentage': 0, + 'total_capacity_gb': 100, + 'free_capacity_gb': 60, + 'QoS_support': True, + 'affinity': True, + 'thin': False, + 'pools': [{'pool_ise_name': "Pool 1", + 'pool_name': "1", + 'status': "Operational", + 'status_details': "None", + 'free_capacity_gb': 60, + 'free_capacity_gb_raid_0': 60, + 'free_capacity_gb_raid_1': 30, + 'free_capacity_gb_raid_5': 45, + 'allocated_capacity_gb': 40, + 'allocated_capacity_gb_raid_0': 0, + 'allocated_capacity_gb_raid_1': 40, + 'allocated_capacity_gb_raid_5': 0, + 'health': 100, + 'media': "Hybrid", + 'total_capacity_gb': 100, + 'QoS_support': True, + 'reserved_percentage': 0}], + 'active_volumes': 2, + 'storage_protocol': protocol} + + act_result = self.driver.get_volume_stats(True) + self.assertDictMatch(exp_result, act_result) + + def test_get_volume_stats_ssl(self, mock_req): + self.configuration.driver_use_ssl = True + self.setup_driver() + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_STORAGE_POOLS_RESP]) + self.driver.get_volume_stats(True) + + def test_negative_get_volume_stats_bad_primary(self, mock_req): + self.configuration.ise_connection_retries = 1 + self.setup_driver() + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_BAD_CONNECTION_RESP, + ISE_GET_STORAGE_POOLS_RESP]) + self.driver.get_volume_stats(True) + + def test_create_volume(self, mock_req): + ctxt = context.get_admin_context() + extra_specs = {"Feature:Pool": "1", + "Feature:Raid": "1", + "Affinity:Type": "flash", + "Alloc:Type": "thick"} + type_ref = volume_types.create(ctxt, 'VT1', extra_specs) + specs = {'qos:minIOPS': '20', + 'qos:maxIOPS': '2000', + 'qos:burstIOPS': '5000'} + qos = qos_specs.create(ctxt, 'fake-qos', specs) + qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) + VOLUME1['volume_type_id'] = type_ref['id'] + self.setup_driver() + if self.configuration.ise_protocol == 'iscsi': + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_CREATE_VOLUME_RESP, + ISE_GET_VOL1_STATUS_RESP, + ISE_GET_IONETWORKS_RESP]) + exp_result = {} + exp_result = {"provider_auth": ""} + act_result = self.driver.create_volume(VOLUME1) + self.assertDictMatch(exp_result, act_result) + elif self.configuration.ise_protocol == 'fibre_channel': + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_CREATE_VOLUME_RESP, + ISE_GET_VOL1_STATUS_RESP]) + self.driver.create_volume(VOLUME1) + + def test_create_volume_chap(self, mock_req): + ctxt = context.get_admin_context() + extra_specs = {"Feature:Pool": "1", + "Feature:Raid": "1", + "Affinity:Type": "flash", + "Alloc:Type": "thick"} + type_ref = volume_types.create(ctxt, 'VT1', extra_specs) + specs = {'qos:minIOPS': '20', + 'qos:maxIOPS': '2000', + 'qos:burstIOPS': '5000'} + qos = qos_specs.create(ctxt, 'fake-qos', specs) + qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) + VOLUME1['volume_type_id'] = type_ref['id'] + self.setup_driver() + if self.configuration.ise_protocol == 'iscsi': + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_CREATE_VOLUME_RESP, + ISE_GET_VOL1_STATUS_RESP, + ISE_GET_IONETWORKS_CHAP_RESP]) + exp_result = {} + exp_result = {"provider_auth": "CHAP abc abc"} + act_result = self.driver.create_volume(VOLUME1) + self.assertDictMatch(exp_result, act_result) + elif self.configuration.ise_protocol == 'fibre_channel': + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_CREATE_VOLUME_RESP, + ISE_GET_VOL1_STATUS_RESP]) + self.driver.create_volume(VOLUME1) + + def test_create_volume_type_none(self, mock_req): + self.setup_driver() + if self.configuration.ise_protocol == 'iscsi': + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_CREATE_VOLUME_RESP, + ISE_GET_VOL1_STATUS_RESP, + ISE_GET_IONETWORKS_RESP]) + elif self.configuration.ise_protocol == 'fibre_channel': + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_CREATE_VOLUME_RESP, + ISE_GET_VOL1_STATUS_RESP]) + self.driver.create_volume(VOLUME3) + + def test_delete_volume(self, mock_req): + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_ALLOC_WITH_EP_RESP, + ISE_DELETE_ALLOC_RESP, + ISE_GET_VOL1_STATUS_RESP, + ISE_DELETE_VOLUME_RESP]) + self.setup_driver() + self.driver.delete_volume(VOLUME1) + + def test_delete_volume_none_existing(self, mock_req): + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_ALLOC_WITH_EP_RESP, + ISE_DELETE_ALLOC_RESP, + ISE_GET_VOL1_STATUS_RESP]) + self.setup_driver() + self.driver.delete_volume(VOLUME2) + + def test_initialize_connection_positive(self, mock_req): + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_HOSTS_HOST2_RESP, + ISE_CREATE_HOST_RESP, + ISE_GET_HOSTS_HOST1_RESP, + ISE_CREATE_ALLOC_RESP, + ISE_GET_ALLOC_WITH_EP_RESP, + ISE_GET_CONTROLLERS_RESP]) + self.setup_driver() + + exp_result = {} + if self.configuration.ise_protocol == 'iscsi': + exp_result = {"driver_volume_type": "iscsi", + "data": {"target_lun": '1', + "volume_id": '1', + "access_mode": 'rw', + "target_discovered": False, + "target_iqn": ISE_IQN, + "target_portal": ISE_ISCSI_IP1 + ":3260"}} + elif self.configuration.ise_protocol == 'fibre_channel': + exp_result = {"driver_volume_type": "fibre_channel", + "data": {"target_lun": '1', + "volume_id": '1', + "access_mode": 'rw', + "target_discovered": True, + "initiator_target_map": ISE_INIT_TARGET_MAP, + "target_wwn": ISE_TARGETS}} + + act_result =\ + self.driver.initialize_connection(VOLUME1, self.connector) + self.assertDictMatch(exp_result, act_result) + + def test_initialize_connection_positive_host_type(self, mock_req): + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_HOSTS_HOST1_HOST_TYPE_RESP, + ISE_MODIFY_HOST_RESP, + ISE_CREATE_ALLOC_RESP, + ISE_GET_ALLOC_WITH_EP_RESP, + ISE_GET_CONTROLLERS_RESP]) + self.setup_driver() + + exp_result = {} + if self.configuration.ise_protocol == 'iscsi': + exp_result = {"driver_volume_type": "iscsi", + "data": {"target_lun": '1', + "volume_id": '1', + "access_mode": 'rw', + "target_discovered": False, + "target_iqn": ISE_IQN, + "target_portal": ISE_ISCSI_IP1 + ":3260"}} + elif self.configuration.ise_protocol == 'fibre_channel': + exp_result = {"driver_volume_type": "fibre_channel", + "data": {"target_lun": '1', + "volume_id": '1', + "access_mode": 'rw', + "target_discovered": True, + "initiator_target_map": ISE_INIT_TARGET_MAP, + "target_wwn": ISE_TARGETS}} + + act_result =\ + self.driver.initialize_connection(VOLUME1, self.connector) + self.assertDictMatch(exp_result, act_result) + + def test_initialize_connection_positive_chap(self, mock_req): + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_HOSTS_HOST2_RESP, + ISE_CREATE_HOST_RESP, + ISE_GET_HOSTS_HOST1_RESP, + ISE_CREATE_ALLOC_RESP, + ISE_GET_ALLOC_WITH_EP_RESP, + ISE_GET_CONTROLLERS_RESP]) + self.setup_driver() + exp_result = {} + if self.configuration.ise_protocol == 'iscsi': + exp_result = {"driver_volume_type": "iscsi", + "data": {"target_lun": '1', + "volume_id": '2', + "access_mode": 'rw', + "target_discovered": False, + "target_iqn": ISE_IQN, + "target_portal": ISE_ISCSI_IP1 + ":3260", + 'auth_method': 'CHAP', + 'auth_username': 'abc', + 'auth_password': 'abc'}} + elif self.configuration.ise_protocol == 'fibre_channel': + exp_result = {"driver_volume_type": "fibre_channel", + "data": {"target_lun": '1', + "volume_id": '2', + "access_mode": 'rw', + "target_discovered": True, + "initiator_target_map": ISE_INIT_TARGET_MAP, + "target_wwn": ISE_TARGETS}} + + act_result =\ + self.driver.initialize_connection(VOLUME2, self.connector) + self.assertDictMatch(exp_result, act_result) + + def test_initialize_connection_negative_no_host(self, mock_req): + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_HOSTS_HOST2_RESP, + ISE_CREATE_HOST_RESP, + ISE_GET_HOSTS_HOST2_RESP]) + self.setup_driver() + self.assertRaises(exception.XIODriverException, + self.driver.initialize_connection, + VOLUME2, self.connector) + + def test_initialize_connection_negative_host_type(self, mock_req): + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_HOSTS_HOST1_HOST_TYPE_RESP, + ISE_400_RESP]) + self.setup_driver() + self.assertRaises(exception.XIODriverException, + self.driver.initialize_connection, + VOLUME2, self.connector) + + def test_terminate_connection_positive(self, mock_req): + self.setup_driver() + if self.configuration.ise_protocol == 'iscsi': + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_HOSTS_HOST1_RESP, + ISE_GET_ALLOC_WITH_EP_RESP, + ISE_DELETE_ALLOC_RESP, + ISE_GET_ALLOC_WITH_EP_RESP, + ISE_GET_HOSTS_HOST1_RESP, + ISE_DELETE_ALLOC_RESP]) + elif self.configuration.ise_protocol == 'fibre_channel': + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_HOSTS_HOST1_RESP, + ISE_GET_ALLOC_WITH_EP_RESP, + ISE_DELETE_ALLOC_RESP, + ISE_GET_ALLOC_WITH_EP_RESP, + ISE_GET_CONTROLLERS_RESP, + ISE_GET_HOSTS_HOST1_RESP, + ISE_DELETE_ALLOC_RESP]) + self.driver.terminate_connection(VOLUME1, self.connector) + + def test_terminate_connection_positive_noalloc(self, mock_req): + self.setup_driver() + if self.configuration.ise_protocol == 'iscsi': + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_HOSTS_HOST1_RESP, + ISE_GET_ALLOC_WITH_NO_ALLOC_RESP, + ISE_GET_ALLOC_WITH_NO_ALLOC_RESP, + ISE_GET_HOSTS_HOST1_RESP, + ISE_DELETE_ALLOC_RESP]) + elif self.configuration.ise_protocol == 'fibre_channel': + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_HOSTS_HOST1_RESP, + ISE_GET_ALLOC_WITH_NO_ALLOC_RESP, + ISE_GET_ALLOC_WITH_NO_ALLOC_RESP, + ISE_GET_CONTROLLERS_RESP, + ISE_GET_HOSTS_HOST1_RESP, + ISE_DELETE_ALLOC_RESP]) + self.driver.terminate_connection(VOLUME1, self.connector) + + def test_negative_terminate_connection_bad_host(self, mock_req): + self.setup_driver() + test_connector = {} + if self.configuration.ise_protocol == 'iscsi': + test_connector['initiator'] = 'bad_iqn' + test_connector['host'] = '' + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_HOSTS_HOST1_RESP]) + elif self.configuration.ise_protocol == 'fibre_channel': + test_connector['wwpns'] = 'bad_wwn' + test_connector['host'] = '' + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_HOSTS_HOST1_RESP, + ISE_GET_CONTROLLERS_RESP]) + + self.driver.terminate_connection(VOLUME1, test_connector) + + def test_create_snapshot(self, mock_req): + ctxt = context.get_admin_context() + extra_specs = {"Feature:Pool": "1", + "Feature:Raid": "1", + "Affinity:Type": "flash", + "Alloc:Type": "thick"} + type_ref = volume_types.create(ctxt, 'VT1', extra_specs) + specs = {'qos:minIOPS': '20', + 'qos:maxIOPS': '2000', + 'qos:burstIOPS': '5000'} + qos = qos_specs.create(ctxt, 'fake-qos', specs) + qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) + SNAPSHOT1['volume_type_id'] = type_ref['id'] + + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL1_STATUS_RESP, + ISE_PREP_SNAPSHOT_RESP, + ISE_GET_SNAP1_STATUS_RESP, + ISE_CREATE_SNAPSHOT_RESP, + ISE_GET_SNAP1_STATUS_RESP]) + self.setup_driver() + self.driver.create_snapshot(SNAPSHOT1) + + @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', + new=utils.ZeroIntervalLoopingCall) + def test_negative_create_snapshot_invalid_state_recover(self, mock_req): + ctxt = context.get_admin_context() + extra_specs = {"Feature:Pool": "1", + "Feature:Raid": "1", + "Affinity:Type": "flash", + "Alloc:Type": "thick"} + type_ref = volume_types.create(ctxt, 'VT1', extra_specs) + specs = {'qos:minIOPS': '20', + 'qos:maxIOPS': '2000', + 'qos:burstIOPS': '5000'} + qos = qos_specs.create(ctxt, 'fake-qos', specs) + qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) + SNAPSHOT1['volume_type_id'] = type_ref['id'] + + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL1_STATUS_RESP, + ISE_400_INVALID_STATE_RESP, + ISE_PREP_SNAPSHOT_RESP, + ISE_GET_SNAP1_STATUS_RESP, + ISE_CREATE_SNAPSHOT_RESP, + ISE_GET_SNAP1_STATUS_RESP]) + self.setup_driver() + self.driver.create_snapshot(SNAPSHOT1) + + @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', + new=utils.ZeroIntervalLoopingCall) + def test_negative_create_snapshot_invalid_state_norecover(self, mock_req): + ctxt = context.get_admin_context() + extra_specs = {"Feature:Pool": "1", + "Feature:Raid": "1", + "Affinity:Type": "flash", + "Alloc:Type": "thick"} + type_ref = volume_types.create(ctxt, 'VT1', extra_specs) + specs = {'qos:minIOPS': '20', + 'qos:maxIOPS': '2000', + 'qos:burstIOPS': '5000'} + qos = qos_specs.create(ctxt, 'fake-qos', specs) + qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) + SNAPSHOT1['volume_type_id'] = type_ref['id'] + + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL1_STATUS_RESP, + ISE_400_INVALID_STATE_RESP, + ISE_400_INVALID_STATE_RESP, + ISE_400_INVALID_STATE_RESP, + ISE_400_INVALID_STATE_RESP, + ISE_400_INVALID_STATE_RESP]) + self.configuration.ise_completion_retries = 5 + self.setup_driver() + self.assertRaises(exception.XIODriverException, + self.driver.create_snapshot, SNAPSHOT1) + + def test_negative_create_snapshot_conflict(self, mock_req): + ctxt = context.get_admin_context() + extra_specs = {"Feature:Pool": "1", + "Feature:Raid": "1", + "Affinity:Type": "flash", + "Alloc:Type": "thick"} + type_ref = volume_types.create(ctxt, 'VT1', extra_specs) + specs = {'qos:minIOPS': '20', + 'qos:maxIOPS': '2000', + 'qos:burstIOPS': '5000'} + qos = qos_specs.create(ctxt, 'fake-qos', specs) + qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) + SNAPSHOT1['volume_type_id'] = type_ref['id'] + + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL1_STATUS_RESP, + ISE_409_CONFLICT_RESP]) + self.configuration.ise_completion_retries = 1 + self.setup_driver() + self.assertRaises(exception.XIODriverException, + self.driver.create_snapshot, SNAPSHOT1) + + def test_delete_snapshot(self, mock_req): + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_ALLOC_WITH_EP_RESP, + ISE_DELETE_ALLOC_RESP, + ISE_GET_SNAP1_STATUS_RESP, + ISE_DELETE_VOLUME_RESP]) + self.setup_driver() + self.driver.delete_snapshot(SNAPSHOT1) + + def test_clone_volume(self, mock_req): + ctxt = context.get_admin_context() + extra_specs = {"Feature:Pool": "1", + "Feature:Raid": "1", + "Affinity:Type": "flash", + "Alloc:Type": "thick"} + type_ref = volume_types.create(ctxt, 'VT1', extra_specs) + specs = {'qos:minIOPS': '20', + 'qos:maxIOPS': '2000', + 'qos:burstIOPS': '5000'} + qos = qos_specs.create(ctxt, 'fake-qos', specs) + qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) + VOLUME1['volume_type_id'] = type_ref['id'] + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL1_STATUS_RESP, + ISE_PREP_SNAPSHOT_RESP, + ISE_GET_SNAP1_STATUS_RESP, + ISE_CREATE_SNAPSHOT_RESP, + ISE_GET_SNAP1_STATUS_RESP]) + self.setup_driver() + self.driver.create_cloned_volume(CLONE1, VOLUME1) + + def test_extend_volume(self, mock_req): + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL1_STATUS_RESP, + ISE_MODIFY_VOLUME_RESP]) + self.setup_driver() + self.driver.extend_volume(VOLUME1, NEW_VOLUME_SIZE) + + def test_retype_volume(self, mock_req): + ctxt = context.get_admin_context() + extra_specs = {"Feature:Pool": "1", + "Feature:Raid": "1", + "Affinity:Type": "flash", + "Alloc:Type": "thick"} + type_ref = volume_types.create(ctxt, 'VT1', extra_specs) + specs = {'qos:minIOPS': '20', + 'qos:maxIOPS': '2000', + 'qos:burstIOPS': '5000'} + qos = qos_specs.create(ctxt, 'fake-qos', specs) + qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) + VOLUME1['volume_type_id'] = type_ref['id'] + # New volume type + extra_specs = {"Feature:Pool": "1", + "Feature:Raid": "5", + "Affinity:Type": "flash", + "Alloc:Type": "thick"} + type_ref = volume_types.create(ctxt, 'VT2', extra_specs) + specs = {'qos:minIOPS': '30', + 'qos:maxIOPS': '3000', + 'qos:burstIOPS': '10000'} + qos = qos_specs.create(ctxt, 'fake-qos2', specs) + qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) + + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL1_STATUS_RESP, + ISE_MODIFY_VOLUME_RESP]) + self.setup_driver() + self.driver.retype(ctxt, VOLUME1, type_ref, 0, 0) + + def test_create_volume_from_snapshot(self, mock_req): + ctxt = context.get_admin_context() + extra_specs = {"Feature:Pool": "1", + "Feature:Raid": "1", + "Affinity:Type": "flash", + "Alloc:Type": "thick"} + type_ref = volume_types.create(ctxt, 'VT1', extra_specs) + specs = {'qos:minIOPS': '20', + 'qos:maxIOPS': '2000', + 'qos:burstIOPS': '5000'} + qos = qos_specs.create(ctxt, 'fake-qos', specs) + qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) + SNAPSHOT1['volume_type_id'] = type_ref['id'] + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_SNAP1_STATUS_RESP, + ISE_PREP_SNAPSHOT_RESP, + ISE_GET_VOL1_STATUS_RESP, + ISE_CREATE_SNAPSHOT_RESP, + ISE_GET_VOL1_STATUS_RESP]) + self.setup_driver() + self.driver.create_volume_from_snapshot(VOLUME1, SNAPSHOT1) + + def test_manage_existing(self, mock_req): + ctxt = context.get_admin_context() + extra_specs = {"Feature:Pool": "1", + "Feature:Raid": "1", + "Affinity:Type": "flash", + "Alloc:Type": "thick"} + type_ref = volume_types.create(ctxt, 'VT1', extra_specs) + specs = {'qos:minIOPS': '20', + 'qos:maxIOPS': '2000', + 'qos:burstIOPS': '5000'} + qos = qos_specs.create(ctxt, 'fake-qos', specs) + qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) + VOLUME1['volume_type_id'] = type_ref['id'] + self.setup_driver() + if self.configuration.ise_protocol == 'iscsi': + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL1_STATUS_RESP, + ISE_MODIFY_VOLUME_RESP, + ISE_GET_IONETWORKS_RESP]) + elif self.configuration.ise_protocol == 'fibre_channel': + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL1_STATUS_RESP, + ISE_MODIFY_VOLUME_RESP]) + self.driver.manage_existing(VOLUME1, {'source-name': 'testvol'}) + + def test_manage_existing_no_source_name(self, mock_req): + self.setup_driver() + if self.configuration.ise_protocol == 'iscsi': + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL1_STATUS_RESP, + ISE_MODIFY_VOLUME_RESP, + ISE_GET_IONETWORKS_RESP]) + elif self.configuration.ise_protocol == 'fibre_channel': + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL1_STATUS_RESP, + ISE_MODIFY_VOLUME_RESP]) + self.assertRaises(exception.XIODriverException, + self.driver.manage_existing, VOLUME1, {}) + + def test_manage_existing_get_size(self, mock_req): + self.setup_driver() + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL1_STATUS_RESP]) + exp_result = 10 + act_result = \ + self.driver.manage_existing_get_size(VOLUME1, + {'source-name': 'a'}) + self.assertEqual(exp_result, act_result) + + def test_manage_existing_get_size_no_source_name(self, mock_req): + self.setup_driver() + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL1_STATUS_RESP]) + self.assertRaises(exception.XIODriverException, + self.driver.manage_existing_get_size, VOLUME1, {}) + + def test_unmanage(self, mock_req): + self.setup_driver() + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL1_STATUS_RESP]) + self.driver.unmanage(VOLUME1) + + def test_negative_unmanage_no_volume_status_xml(self, mock_req): + self.setup_driver() + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL_STATUS_NO_STATUS_RESP]) + self.driver.unmanage(VOLUME1) + + def test_negative_unmanage_no_volume_xml(self, mock_req): + self.setup_driver() + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL_STATUS_NO_VOL_NODE_RESP]) + self.assertRaises(exception.XIODriverException, + self.driver.unmanage, VOLUME1) + + def test_negative_unmanage_non_existing_volume(self, mock_req): + self.setup_driver() + mock_req.side_effect = iter([ISE_GET_QUERY_RESP, + ISE_GET_VOL_STATUS_404_RESP]) + self.assertRaises(exception.XIODriverException, + self.driver.unmanage, VOLUME1) + + +class XIOISEISCSIDriverTestCase(XIOISEDriverTestCase, test.TestCase): + + def setUp(self): + super(XIOISEISCSIDriverTestCase, self).setUp() + self.setup_test('iscsi') + + +class XIOISEFCDriverTestCase(XIOISEDriverTestCase, test.TestCase): + + def setUp(self): + super(XIOISEFCDriverTestCase, self).setUp() + self.setup_test('fibre_channel') diff --git a/cinder/volume/drivers/xio.py b/cinder/volume/drivers/xio.py new file mode 100644 index 000000000..6dd693d07 --- /dev/null +++ b/cinder/volume/drivers/xio.py @@ -0,0 +1,1605 @@ +# Copyright (c) 2014 X-IO. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import string + +from lxml import etree +from oslo_config import cfg +from oslo_log import log as logging +from oslo_service import loopingcall +from six.moves import urllib + +from cinder import context +from cinder import exception +from cinder.i18n import _LE, _LI, _LW +from cinder.volume import driver +from cinder.volume.drivers.san import san +from cinder.volume import qos_specs +from cinder.volume import volume_types +from cinder.zonemanager import utils as fczm_utils + +XIO_OPTS = [ + cfg.IntOpt('ise_storage_pool', default=1, + help='Default storage pool for volumes.'), + cfg.IntOpt('ise_raid', default=1, + help='Raid level for ISE volumes.'), + cfg.IntOpt('ise_connection_retries', default=5, + help='Number of retries (per port) when establishing ' + 'connection to ISE management port.'), + cfg.IntOpt('ise_retry_interval', default=1, + help='Interval (secs) between retries.'), + cfg.IntOpt('ise_completion_retries', default=30, + help='Number on retries to get completion status after ' + 'issuing a command to ISE.'), +] + + +CONF = cfg.CONF +CONF.register_opts(XIO_OPTS) + +LOG = logging.getLogger(__name__) + +OPERATIONAL_STATUS = 'OPERATIONAL' +PREPARED_STATUS = 'PREPARED' +INVALID_STATUS = 'VALID' + + +# Raise exception for X-IO driver +def RaiseXIODriverException(): + raise exception.XIODriverException() + + +class XIOISEDriver(object): + + VERSION = '1.1.2' + + # Version Changes + # 1.0.0 Base driver + # 1.1.0 QoS, affinity, retype and thin support + # 1.1.1 Fix retry loop (Bug 1429283) + # 1.1.2 Fix host object deletion (Bug 1433450). + + def __init__(self, *args, **kwargs): + super(XIOISEDriver, self).__init__() + LOG.debug("XIOISEDriver __init__ called.") + self.configuration = kwargs.get('configuration', None) + self.ise_primary_ip = '' + self.ise_secondary_ip = '' + self.newquery = 1 + self.ise_globalid = None + self._vol_stats = {} + + def do_setup(self, context): + LOG.debug("XIOISEDriver do_setup called.") + self._get_ise_globalid() + + def check_for_setup_error(self): + LOG.debug("XIOISEDriver check_for_setup_error called.") + # The san_ip must always be set + if self.configuration.san_ip == "": + LOG.error(_LE("san ip must be configured!")) + RaiseXIODriverException() + # The san_login must always be set + if self.configuration.san_login == "": + LOG.error(_LE("san_login must be configured!")) + RaiseXIODriverException() + # The san_password must always be set + if self.configuration.san_password == "": + LOG.error(_LE("san_password must be configured!")) + RaiseXIODriverException() + return + + def _get_version(self): + """Return driver version.""" + return self.VERSION + + def _send_query(self): + """Do initial query to populate ISE global id.""" + body = '' + url = '/query' + resp = self._connect('GET', url, body) + status = resp['status'] + if status != 200: + # unsuccessful - this is fatal as we need the global id + # to build REST requests. + LOG.error(_LE("Array query failed - No response (%d)!"), status) + RaiseXIODriverException() + # Successfully fetched QUERY info. Parse out globalid along with + # ipaddress for Controller 1 and Controller 2. We assign primary + # ipaddress to use based on controller rank + xml_tree = etree.fromstring(resp['content']) + # first check that the ISE is running a supported FW version + support = {} + support['thin'] = False + support['clones'] = False + support['thin-clones'] = False + self.configuration.ise_affinity = False + self.configuration.ise_qos = False + capabilities = xml_tree.find('capabilities') + if capabilities is None: + LOG.error(_LE("Array query failed. No capabilities in response!")) + RaiseXIODriverException() + for node in capabilities: + if node.tag != 'capability': + continue + capability = node + if capability.attrib['value'] == '49003': + self.configuration.ise_affinity = True + elif capability.attrib['value'] == '49004': + self.configuration.ise_qos = True + elif capability.attrib['value'] == '49005': + support['thin'] = True + elif capability.attrib['value'] == '49006': + support['clones'] = True + elif capability.attrib['value'] == '49007': + support['thin-clones'] = True + # Make sure ISE support necessary features + if not support['clones']: + LOG.error(_LE("ISE FW version is not compatible with Openstack!")) + RaiseXIODriverException() + # set up thin provisioning support + self.configuration.san_thin_provision = support['thin-clones'] + # Fill in global id, primary and secondary ip addresses + globalid = xml_tree.find('globalid') + if globalid is None: + LOG.error(_LE("Array query failed. No global id in XML response!")) + RaiseXIODriverException() + self.ise_globalid = globalid.text + controllers = xml_tree.find('controllers') + if controllers is None: + LOG.error(_LE("Array query failed. No controllers in response!")) + RaiseXIODriverException() + for node in controllers: + if node.tag != 'controller': + continue + # found a controller node + controller = node + ipaddress = controller.find('ipaddress') + ranktag = controller.find('rank') + if ipaddress is None: + continue + # found an ipaddress tag + # make sure rank tag is present + if ranktag is None: + continue + rank = ranktag.attrib['value'] + # make sure rank value is present + if rank is None: + continue + if rank == '1': + # rank 1 means primary (xo) + self.ise_primary_ip = ipaddress.text + LOG.debug('Setting primary IP to: %s.', + self.ise_primary_ip) + elif rank == '0': + # rank 0 means secondary (nxo) + self.ise_secondary_ip = ipaddress.text + LOG.debug('Setting secondary IP to: %s.', + self.ise_secondary_ip) + # clear out new query request flag on successful fetch of QUERY info. + self.newquery = 0 + return support + + def _get_ise_globalid(self): + """Return ISE globalid.""" + if self.ise_globalid is None or self.newquery == 1: + # this call will populate globalid + self._send_query() + if self.ise_globalid is None: + LOG.error(_LE("ISE globalid not set!")) + RaiseXIODriverException() + return self.ise_globalid + + def _get_ise_primary_ip(self): + """Return Primary IP address to REST API.""" + if self.ise_primary_ip == '': + # Primary IP is set to ISE IP passed in from cinder.conf + self.ise_primary_ip = self.configuration.san_ip + if self.ise_primary_ip == '': + # No IP - fatal. + LOG.error(_LE("Primary IP must be set!")) + RaiseXIODriverException() + return self.ise_primary_ip + + def _get_ise_secondary_ip(self): + """Return Secondary IP address to REST API.""" + if self.ise_secondary_ip != '': + return self.ise_secondary_ip + + def _get_uri_prefix(self): + """Returns prefix in form of http(s)://1.2.3.4""" + prefix = '' + # figure out if http or https should be used + if self.configuration.driver_use_ssl: + prefix = 'https://' + else: + prefix = 'http://' + # add the IP address + prefix += self._get_ise_primary_ip() + return prefix + + def _opener(self, method, url, body, header): + """Wrapper to handle connection""" + response = {} + response['status'] = 0 + response['content'] = '' + response['location'] = '' + # send the request + req = urllib.request.Request(url, body, header) + # Override method to allow GET, PUT, POST, DELETE + req.get_method = lambda: method + try: + resp = urllib.request.urlopen(req) + except urllib.error.HTTPError as err: + # HTTP error. Return HTTP status and content and let caller + # handle retries. + response['status'] = err.code + response['content'] = err.read() + except urllib.error.URLError as err: + # Connection failure. Return a status of 0 to indicate error. + response['status'] = 0 + else: + # Successful. Return status code, content, + # and location header, if present. + response['status'] = resp.getcode() + response['content'] = resp.read() + response['location'] = \ + resp.info().getheader('Content-Location', '') + return response + + def _help_call_method(self, args, retry_count): + """Helper function used for prepare clone and delete REST calls.""" + # This function calls request method and URL and checks the response. + # Certain cases allows for retries, while success and fatal status + # will fall out and tell parent to break out of loop. + # initialize remaining to one less than retries + remaining = retry_count + resp = self._send_cmd(args['method'], args['url'], args['arglist']) + status = resp['status'] + if (status == 400): + reason = '' + if 'content' in resp: + reason = etree.fromstring(resp['content']) + if reason is not None: + reason = string.upper(reason.text) + if INVALID_STATUS in reason: + # Request failed with an invalid state. This can be because + # source volume is in a temporary unavailable state. + LOG.debug('REST call failed with invalid state: ' + '%(method)s - %(status)d - %(reason)s', + {'method': args['method'], + 'status': status, 'reason': reason}) + # Let parent check retry eligibility based on remaining retries + remaining -= 1 + else: + # Fatal error. Set remaining to 0 to make caller exit loop. + remaining = 0 + else: + # set remaining to 0 to make caller exit loop + # original waiter will handle the difference between success and + # fatal error based on resp['status']. + remaining = 0 + return (remaining, resp) + + def _help_call_opener(self, args, retry_count): + """Helper function to call _opener.""" + # This function calls _opener func and checks the response. + # If response is 0 it will decrement the remaining retry count. + # On successful connection it will set remaining to 0 to signal + # parent to break out of loop. + remaining = retry_count + response = self._opener(args['method'], args['url'], + args['body'], args['header']) + if response['status'] != 0: + # We are done + remaining = 0 + else: + # Let parent check retry eligibility based on remaining retries. + remaining -= 1 + # Return remaining and response + return (remaining, response) + + def _help_wait_for_status(self, args, retry_count): + """Helper function to wait for specified volume status""" + # This function calls _get_volume_info and checks the response. + # If the status strings do not match the specified status it will + # return the remaining retry count decremented by one. + # On successful match it will set remaining to 0 to signal + # parent to break out of loop. + remaining = retry_count + info = self._get_volume_info(args['name']) + status = args['status_string'] + if (status in info['string'] or status in info['details']): + remaining = 0 + else: + # Let parent check retry eligibility based on remaining retries. + remaining -= 1 + # return remaining and volume info + return (remaining, info) + + def _wait_for_completion(self, help_func, args, retry_count): + """Helper function to wait for completion of passed function""" + # Helper call loop function. + def _call_loop(loop_args): + remaining = loop_args['retries'] + args = loop_args['args'] + LOG.debug("In call loop (%(remaining)d) %(args)s", + {'remaining': remaining, 'args': args}) + (remaining, response) = loop_args['func'](args, remaining) + if remaining == 0: + # We are done - let our caller handle response + raise loopingcall.LoopingCallDone(response) + loop_args['retries'] = remaining + + # Setup retries, interval and call wait function. + loop_args = {} + loop_args['retries'] = retry_count + loop_args['func'] = help_func + loop_args['args'] = args + interval = self.configuration.ise_retry_interval + timer = loopingcall.FixedIntervalLoopingCall(_call_loop, loop_args) + return timer.start(interval).wait() + + def _connect(self, method, uri, body=''): + """Set up URL and HTML and call _opener to make request""" + url = '' + # see if we need to add prefix + # this call will force primary ip to be filled in as well + prefix = self._get_uri_prefix() + if prefix not in uri: + url = prefix + url += uri + # set up headers for XML and Auth + header = {'Content-Type': 'application/xml; charset=utf-8'} + auth_key =\ + base64.encodestring('%s:%s' % + (self.configuration.san_login, + self.configuration.san_password))[:-1] + header['Authorization'] = 'Basic %s' % auth_key + # We allow 5 retries on each IP address. If connection to primary + # fails, secondary will be tried. If connection to secondary is + # successful, the request flag for a new QUERY will be set. The QUERY + # will be sent on next connection attempt to figure out which + # controller is primary in case it has changed. + LOG.debug("Connect: %(method)s %(url)s %(body)s", + {'method': method, 'url': url, 'body': body}) + using_secondary = 0 + response = {} + response['status'] = 0 + response['location'] = '' + response['content'] = '' + primary_ip = self._get_ise_primary_ip() + secondary_ip = self._get_ise_secondary_ip() + # This will first try connecting to primary IP and then secondary IP. + args = {} + args['method'] = method + args['url'] = url + args['body'] = body + args['header'] = header + retries = self.configuration.ise_connection_retries + while True: + response = self._wait_for_completion(self._help_call_opener, + args, retries) + if response['status'] != 0: + # Connection succeeded. Request new query on next connection + # attempt if we used secondary ip to sort out who should be + # primary going forward + self.newquery = using_secondary + return response + # connection failed - check if we have any retries left + if using_secondary == 0: + # connection on primary ip failed + # try secondary ip + if secondary_ip is '': + # if secondary is not setup yet, then assert + # connection on primary and secondary ip failed + LOG.error(_LE("Connection to %s failed and no secondary!"), + primary_ip) + RaiseXIODriverException() + # swap primary for secondary ip in URL + url = string.replace(url, primary_ip, secondary_ip) + LOG.debug('Trying secondary IP URL: %s', url) + using_secondary = 1 + continue + # connection failed on both IPs - break out of the loop + break + # connection on primary and secondary ip failed + LOG.error(_LE("Could not connect to %(primary)s or %(secondary)s!"), + {'primary': primary_ip, 'secondary': secondary_ip}) + RaiseXIODriverException() + + def _param_string(self, params): + """Turn (name, value) pairs into single param string""" + param_str = [] + for name, value in params.items(): + if value != '': + param_str.append("%s=%s" % (name, value)) + return '&'.join(param_str) + + def _send_cmd(self, method, url, params): + """Prepare HTTP request and call _connect""" + # Add params to appropriate field based on method + body = '' + if method == 'GET': + if params != {}: + url += '?' + self._param_string(params) + body = '' + elif method == 'POST': + body = self._param_string(params) + elif method == 'DELETE': + body = '' + elif method == 'PUT': + if params != {}: + url += '?' + self._param_string(params) + # ISE REST API is mostly synchronous but has some asynchronous + # streaks. Add retries to work around design of ISE REST API that + # does not allow certain operations to be in process concurrently. + # This is only an issue if lots of CREATE/DELETE/SNAPSHOT/CLONE ops + # are issued in short order. + return self._connect(method, url, body) + + def find_target_chap(self): + """Return target CHAP settings""" + chap = {} + chap['chap_user'] = '' + chap['chap_passwd'] = '' + url = '/storage/arrays/%s/ionetworks' % (self._get_ise_globalid()) + resp = self._send_cmd('GET', url, {}) + status = resp['status'] + if status != 200: + LOG.warning(_LW("IOnetworks GET failed (%d)"), status) + return chap + # Got a good response. Parse out CHAP info. First check if CHAP is + # enabled and if so parse out username and password. + root = etree.fromstring(resp['content']) + for element in root.iter(): + if element.tag != 'chap': + continue + chapin = element.find('chapin') + if chapin is None: + continue + if chapin.attrib['value'] != '1': + continue + # CHAP is enabled. Store username / pw + chap_user = chapin.find('username') + if chap_user is not None: + chap['chap_user'] = chap_user.text + chap_passwd = chapin.find('password') + if chap_passwd is not None: + chap['chap_passwd'] = chap_passwd.text + break + return chap + + def find_target_iqn(self, iscsi_ip): + """Find Target IQN string""" + url = '/storage/arrays/%s/controllers' % (self._get_ise_globalid()) + resp = self._send_cmd('GET', url, {}) + status = resp['status'] + if status != 200: + # Not good. Throw an exception. + LOG.error(_LE("Controller GET failed (%d)"), status) + RaiseXIODriverException() + # Good response. Parse out IQN that matches iscsi_ip_address + # passed in from cinder.conf. IQN is 'hidden' in globalid field. + root = etree.fromstring(resp['content']) + for element in root.iter(): + if element.tag != 'ioport': + continue + ipaddrs = element.find('ipaddresses') + if ipaddrs is None: + continue + for ipaddr in ipaddrs.iter(): + # Look for match with iscsi_ip_address + if ipaddr is None or ipaddr.text != iscsi_ip: + continue + endpoint = element.find('endpoint') + if endpoint is None: + continue + global_id = endpoint.find('globalid') + if global_id is None: + continue + target_iqn = global_id.text + if target_iqn != '': + return target_iqn + # Did not find a matching IQN. Upsetting. + LOG.error(_LE("Failed to get IQN!")) + RaiseXIODriverException() + + def find_target_wwns(self): + """Return target WWN""" + # Let's look for WWNs + target_wwns = [] + target = '' + url = '/storage/arrays/%s/controllers' % (self._get_ise_globalid()) + resp = self._send_cmd('GET', url, {}) + status = resp['status'] + if status != 200: + # Not good. Throw an exception. + LOG.error(_LE("Controller GET failed (%d)"), status) + RaiseXIODriverException() + # Good response. Parse out globalid (WWN) of endpoint that matches + # protocol and type (array). + controllers = etree.fromstring(resp['content']) + for controller in controllers.iter(): + if controller.tag != 'controller': + continue + fcports = controller.find('fcports') + if fcports is None: + continue + for fcport in fcports: + if fcport.tag != 'fcport': + continue + wwn_tag = fcport.find('wwn') + if wwn_tag is None: + continue + target = wwn_tag.text + target_wwns.append(target) + return target_wwns + + def _find_target_lun(self, location): + """Return LUN for allocation specified in location string""" + resp = self._send_cmd('GET', location, {}) + status = resp['status'] + if status != 200: + # Not good. Throw an exception. + LOG.error(_LE("Failed to get allocation information (%d)!"), + status) + RaiseXIODriverException() + # Good response. Parse out LUN. + xml_tree = etree.fromstring(resp['content']) + allocation = xml_tree.find('allocation') + if allocation is not None: + luntag = allocation.find('lun') + if luntag is not None: + return luntag.text + # Did not find LUN. Throw an exception. + LOG.error(_LE("Failed to get LUN information!")) + RaiseXIODriverException() + + def _get_volume_info(self, vol_name): + """Return status of ISE volume""" + vol_info = {} + vol_info['value'] = '' + vol_info['string'] = '' + vol_info['details'] = '' + vol_info['location'] = '' + vol_info['size'] = '' + # Attempt to collect status value, string and details. Also pick up + # location string from response. Location is used in REST calls + # DELETE/SNAPSHOT/CLONE. + # We ask for specific volume, so response should only contain one + # volume entry. + url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid()) + resp = self._send_cmd('GET', url, {'name': vol_name}) + if resp['status'] != 200: + LOG.warning(_LW("Could not get status for %(name)s (%(status)d)."), + {'name': vol_name, 'status': resp['status']}) + return vol_info + # Good response. Parse down to Volume tag in list of one. + root = etree.fromstring(resp['content']) + volume_node = root.find('volume') + if volume_node is None: + LOG.warning(_LW("No volume node in XML content.")) + return vol_info + # Location can be found as an attribute in the volume node tag. + vol_info['location'] = volume_node.attrib['self'] + # Find status tag + status = volume_node.find('status') + if status is None: + LOG.warning(_LW("No status payload for volume %s."), vol_name) + return vol_info + # Fill in value and string from status tag attributes. + vol_info['value'] = status.attrib['value'] + vol_info['string'] = string.upper(status.attrib['string']) + # Detailed status has it's own list of tags. + details = status.find('details') + if details is not None: + detail = details.find('detail') + if detail is not None: + vol_info['details'] = string.upper(detail.text) + # Get volume size + size_tag = volume_node.find('size') + if size_tag is not None: + vol_info['size'] = size_tag.text + # Return value, string, details and location. + return vol_info + + def _alloc_location(self, volume, hostname, delete=0): + """Find location string for allocation. Also delete alloc per reqst""" + location = '' + url = '/storage/arrays/%s/allocations' % (self._get_ise_globalid()) + resp = self._send_cmd('GET', url, {'name': volume['name'], + 'hostname': hostname}) + if resp['status'] != 200: + LOG.error(_LE("Could not GET allocation information (%d)!"), + resp['status']) + RaiseXIODriverException() + # Good response. Find the allocation based on volume name. + allocation_tree = etree.fromstring(resp['content']) + for allocation in allocation_tree.iter(): + if allocation.tag != 'allocation': + continue + # verify volume name match + volume_tag = allocation.find('volume') + if volume_tag is None: + continue + volumename_tag = volume_tag.find('volumename') + if volumename_tag is None: + continue + volumename = volumename_tag.text + if volumename != volume['name']: + continue + # verified volume name match + # find endpoints list + endpoints = allocation.find('endpoints') + if endpoints is None: + continue + # Found endpoints list. Found matching host if hostname specified, + # otherwise any host is a go. This is used by the caller to + # delete all allocations (presentations) to a volume. + for endpoint in endpoints.iter(): + if hostname != '': + hname_tag = endpoint.find('hostname') + if hname_tag is None: + continue + if string.upper(hname_tag.text) != string.upper(hostname): + continue + # Found hostname match. Location string is an attribute in + # allocation tag. + location = allocation.attrib['self'] + # Delete allocation if requested. + if delete == 1: + self._send_cmd('DELETE', location, {}) + location = '' + break + else: + return location + return location + + def _present_volume(self, volume, hostname, lun): + """Present volume to host at specified LUN""" + # Set up params with volume name, host name and target lun, if + # specified. + target_lun = lun + params = {} + params = {'volumename': volume['name'], + 'hostname': hostname} + # Fill in LUN if specified. + if target_lun != '': + params['lun'] = target_lun + # Issue POST call to allocation. + url = '/storage/arrays/%s/allocations' % (self._get_ise_globalid()) + resp = self._send_cmd('POST', url, params) + status = resp['status'] + if status == 201: + LOG.info(_LI("Volume %s presented."), volume['name']) + elif status == 409: + LOG.warning(_LW("Volume %(name)s already presented (%(status)d)!"), + {'name': volume['name'], 'status': status}) + else: + LOG.error(_LE("Failed to present volume %(name)s (%(status)d)!"), + {'name': volume['name'], 'status': status}) + RaiseXIODriverException() + # Fetch LUN. In theory the LUN should be what caller requested. + # We try to use shortcut as location comes back in Location header. + # Make sure shortcut of using location header worked, if not ask + # for it explicitly. + location = resp['location'] + if location == '': + location = self._alloc_location(volume, hostname) + # Find target LUN + if location != '': + target_lun = self._find_target_lun(location) + # Success. Return target LUN. + LOG.debug("Volume %(volume)s presented: %(host)s %(lun)s", + {'volume': volume['name'], 'host': hostname, + 'lun': target_lun}) + return target_lun + + def find_allocations(self, hostname): + """Find allocations for specified host""" + alloc_cnt = 0 + url = '/storage/arrays/%s/allocations' % (self._get_ise_globalid()) + resp = self._send_cmd('GET', url, {'hostname': hostname}) + status = resp['status'] + if status != 200: + LOG.error(_LE("Failed to get allocation information: " + "%(host)s (%(status)d)!"), + {'host': hostname, 'status': status}) + RaiseXIODriverException() + # Good response. Count the number of allocations. + allocation_tree = etree.fromstring(resp['content']) + for allocation in allocation_tree.iter(): + if allocation.tag != 'allocation': + continue + alloc_cnt += 1 + return alloc_cnt + + def _find_host(self, endpoints): + """Check if host entry exists on ISE based on endpoint (IQN, WWNs)""" + # FC host might have more than one endpoint. ISCSI has only one. + # Check if endpoints is a list, if so use first entry in list for + # host search. + if type(endpoints) is list: + for endpoint in endpoints: + ep = endpoint + break + else: + ep = endpoints + # Got single end point. Now make REST API call to fetch all hosts + LOG.debug("find_host: Looking for host %s.", ep) + host = {} + host['name'] = '' + host['type'] = '' + host['locator'] = '' + params = {} + url = '/storage/arrays/%s/hosts' % (self._get_ise_globalid()) + resp = self._send_cmd('GET', url, params) + status = resp['status'] + if resp['status'] != 200: + LOG.error(_LE("Could not find any hosts (%s)"), status) + RaiseXIODriverException() + # Good response. Try to match up a host based on end point string. + host_tree = etree.fromstring(resp['content']) + for host_node in host_tree.iter(): + if host_node.tag != 'host': + continue + # Found a host tag. Check if end point matches. + endpoints_node = host_node.find('endpoints') + if endpoints_node is None: + continue + for endpoint_node in endpoints_node.iter(): + if endpoint_node.tag != 'endpoint': + continue + gid = endpoint_node.find('globalid') + if gid is None: + continue + if string.upper(gid.text) != string.upper(ep): + continue + # We have a match. Fill in host name, type and locator + host['locator'] = host_node.attrib['self'] + type_tag = host_node.find('type') + if type_tag is not None: + host['type'] = type_tag.text + name_tag = host_node.find('name') + if name_tag is not None: + host['name'] = name_tag.text + break + # This will be filled in or '' based on findings above. + return host + + def _create_host(self, hostname, endpoints): + """Create host entry on ISE for connector""" + # Create endpoint list for REST call. + endpoint_str = '' + if type(endpoints) is list: + ep_str = [] + ec = 0 + for endpoint in endpoints: + if ec == 0: + ep_str.append("%s" % (endpoint)) + else: + ep_str.append("endpoint=%s" % (endpoint)) + ec += 1 + endpoint_str = '&'.join(ep_str) + else: + endpoint_str = endpoints + # Log host creation. + LOG.debug("Create host %(host)s; %(endpoint)s", + {'host': hostname, 'endpoint': endpoint_str}) + # Issue REST call to create host entry of Openstack type. + params = {} + params = {'name': hostname, 'endpoint': endpoint_str, + 'os': 'openstack'} + url = '/storage/arrays/%s/hosts' % (self._get_ise_globalid()) + resp = self._send_cmd('POST', url, params) + status = resp['status'] + if status != 201 and status != 409: + LOG.error(_LE("POST for host create failed (%s)!"), status) + RaiseXIODriverException() + # Successfully created host entry. Return host name. + return hostname + + def _create_clone(self, volume, clone, clone_type): + """Create clone worker function""" + # This function is called for both snapshot and clone + # clone_type specifies what type is being processed + # Creating snapshots and clones is a two step process on current ISE + # FW. First snapshot/clone is prepared and then created. + volume_name = '' + if clone_type == 'snapshot': + volume_name = volume['volume_name'] + elif clone_type == 'clone': + volume_name = volume['name'] + args = {} + # Make sure source volume is ready. This is another case where + # we have to work around asynchronous behavior in ISE REST API. + args['name'] = volume_name + args['status_string'] = OPERATIONAL_STATUS + retries = self.configuration.ise_completion_retries + vol_info = self._wait_for_completion(self._help_wait_for_status, + args, retries) + if vol_info['value'] == '0': + LOG.debug('Source volume %s ready.', volume_name) + else: + LOG.error(_LE("Source volume %s not ready!"), volume_name) + RaiseXIODriverException() + # Prepare snapshot + # get extra_specs and qos specs from source volume + # these functions fill in default values for entries used below + ctxt = context.get_admin_context() + type_id = volume['volume_type_id'] + extra_specs = self._get_extra_specs(ctxt, type_id) + LOG.debug("Volume %(volume_name)s extra_specs %(extra_specs)s", + {'volume_name': volume['name'], 'extra_specs': extra_specs}) + qos = self._get_qos_specs(ctxt, type_id) + # Wait until snapshot/clone is prepared. + args['method'] = 'POST' + args['url'] = vol_info['location'] + args['status'] = 202 + args['arglist'] = {'name': clone['name'], + 'type': clone_type, + 'affinity': extra_specs['affinity'], + 'IOPSmin': qos['minIOPS'], + 'IOPSmax': qos['maxIOPS'], + 'IOPSburst': qos['burstIOPS']} + retries = self.configuration.ise_completion_retries + resp = self._wait_for_completion(self._help_call_method, + args, retries) + if resp['status'] != 202: + # clone prepare failed - bummer + LOG.error(_LE("Prepare clone failed for %s."), clone['name']) + RaiseXIODriverException() + # clone prepare request accepted + # make sure not to continue until clone prepared + args['name'] = clone['name'] + args['status_string'] = PREPARED_STATUS + retries = self.configuration.ise_completion_retries + clone_info = self._wait_for_completion(self._help_wait_for_status, + args, retries) + if PREPARED_STATUS in clone_info['details']: + LOG.debug('Clone %s prepared.', clone['name']) + else: + LOG.error(_LE("Clone %s not in prepared state!"), clone['name']) + RaiseXIODriverException() + # Clone prepared, now commit the create + resp = self._send_cmd('PUT', clone_info['location'], + {clone_type: 'true'}) + if resp['status'] != 201: + LOG.error(_LE("Commit clone failed: %(name)s (%(status)d)!"), + {'name': clone['name'], 'status': resp['status']}) + RaiseXIODriverException() + # Clone create request accepted. Make sure not to return until clone + # operational. + args['name'] = clone['name'] + args['status_string'] = OPERATIONAL_STATUS + retries = self.configuration.ise_completion_retries + clone_info = self._wait_for_completion(self._help_wait_for_status, + args, retries) + if OPERATIONAL_STATUS in clone_info['string']: + LOG.info(_LI("Clone %s created."), clone['name']) + else: + LOG.error(_LE("Commit failed for %s!"), clone['name']) + RaiseXIODriverException() + return + + def _fill_in_available_capacity(self, node, pool): + """Fill in free capacity info for pool.""" + available = node.find('available') + if available is None: + pool['free_capacity_gb'] = 0 + return pool + pool['free_capacity_gb'] = int(available.get('total')) + # Fill in separate RAID level cap + byred = available.find('byredundancy') + if byred is None: + return pool + raid = byred.find('raid-0') + if raid is not None: + pool['free_capacity_gb_raid_0'] = int(raid.text) + raid = byred.find('raid-1') + if raid is not None: + pool['free_capacity_gb_raid_1'] = int(raid.text) + raid = byred.find('raid-5') + if raid is not None: + pool['free_capacity_gb_raid_5'] = int(raid.text) + raid = byred.find('raid-6') + if raid is not None: + pool['free_capacity_gb_raid_6'] = int(raid.text) + return pool + + def _fill_in_used_capacity(self, node, pool): + """Fill in used capacity info for pool.""" + used = node.find('used') + if used is None: + pool['allocated_capacity_gb'] = 0 + return pool + pool['allocated_capacity_gb'] = int(used.get('total')) + # Fill in separate RAID level cap + byred = used.find('byredundancy') + if byred is None: + return pool + raid = byred.find('raid-0') + if raid is not None: + pool['allocated_capacity_gb_raid_0'] = int(raid.text) + raid = byred.find('raid-1') + if raid is not None: + pool['allocated_capacity_gb_raid_1'] = int(raid.text) + raid = byred.find('raid-5') + if raid is not None: + pool['allocated_capacity_gb_raid_5'] = int(raid.text) + raid = byred.find('raid-6') + if raid is not None: + pool['allocated_capacity_gb_raid_6'] = int(raid.text) + return pool + + def _get_pools(self): + """Return information about all pools on ISE""" + pools = [] + pool = {} + vol_cnt = 0 + url = '/storage/pools' + resp = self._send_cmd('GET', url, {}) + status = resp['status'] + if status != 200: + # Request failed. Return what we have, which isn't much. + LOG.warning(_LW("Could not get pool information (%s)!"), status) + return (pools, vol_cnt) + # Parse out available (free) and used. Add them up to get total. + xml_tree = etree.fromstring(resp['content']) + for child in xml_tree: + if child.tag != 'pool': + continue + # Fill in ise pool name + tag = child.find('name') + if tag is not None: + pool['pool_ise_name'] = tag.text + # Fill in globalid + tag = child.find('globalid') + if tag is not None: + pool['globalid'] = tag.text + # Fill in pool name + tag = child.find('id') + if tag is not None: + pool['pool_name'] = tag.text + # Fill in pool status + tag = child.find('status') + if tag is not None: + pool['status'] = tag.attrib['string'] + details = tag.find('details') + if details is not None: + detail = details.find('detail') + if detail is not None: + pool['status_details'] = detail.text + # Fill in available capacity + pool = self._fill_in_available_capacity(child, pool) + # Fill in allocated capacity + pool = self._fill_in_used_capacity(child, pool) + # Fill in media health and type + media = child.find('media') + if media is not None: + medium = media.find('medium') + if medium is not None: + health = medium.find('health') + if health is not None: + pool['health'] = int(health.text) + tier = medium.find('tier') + if tier is not None: + pool['media'] = tier.attrib['string'] + cap = child.find('IOPSmincap') + if cap is not None: + pool['minIOPS_capacity'] = cap.text + cap = child.find('IOPSmaxcap') + if cap is not None: + pool['maxIOPS_capacity'] = cap.text + cap = child.find('IOPSburstcap') + if cap is not None: + pool['burstIOPS_capacity'] = cap.text + pool['total_capacity_gb'] = (int(pool['free_capacity_gb'] + + pool['allocated_capacity_gb'])) + pool['QoS_support'] = self.configuration.ise_qos + pool['reserved_percentage'] = 0 + pools.append(pool) + # count volumes + volumes = child.find('volumes') + if volumes is not None: + for volume in volumes: + vol_cnt += 1 + return (pools, vol_cnt) + + def _update_volume_stats(self): + """Update storage information""" + self._send_query() + data = {} + data["vendor_name"] = 'X-IO' + data["driver_version"] = self._get_version() + if self.configuration.volume_backend_name: + backend_name = self.configuration.volume_backend_name + else: + backend_name = self.__class__.__name__ + data["volume_backend_name"] = backend_name + data['reserved_percentage'] = 0 + # Get total and free capacity. + (pools, vol_cnt) = self._get_pools() + total_cap = 0 + free_cap = 0 + # fill in global capability support + # capacity + for pool in pools: + total_cap += int(pool['total_capacity_gb']) + free_cap += int(pool['free_capacity_gb']) + data['total_capacity_gb'] = int(total_cap) + data['free_capacity_gb'] = int(free_cap) + # QoS + data['QoS_support'] = self.configuration.ise_qos + # Volume affinity + data['affinity'] = self.configuration.ise_affinity + # Thin provisioning + data['thin'] = self.configuration.san_thin_provision + data['pools'] = pools + data['active_volumes'] = int(vol_cnt) + return data + + def get_volume_stats(self, refresh=False): + """Get volume stats.""" + if refresh: + self._vol_stats = self._update_volume_stats() + LOG.debug("ISE get_volume_stats (total, free): %(total)s, %(free)s", + {'total': self._vol_stats['total_capacity_gb'], + 'free': self._vol_stats['free_capacity_gb']}) + return self._vol_stats + + def _get_extra_specs(self, ctxt, type_id): + """Get extra specs from volume type.""" + specs = {} + specs['affinity'] = '' + specs['alloctype'] = '' + specs['pool'] = self.configuration.ise_storage_pool + specs['raid'] = self.configuration.ise_raid + if type_id is not None: + volume_type = volume_types.get_volume_type(ctxt, type_id) + extra_specs = volume_type.get('extra_specs') + # Parse out RAID, pool and affinity values + for key, value in extra_specs.items(): + subkey = '' + if ':' in key: + fields = key.split(':') + key = fields[0] + subkey = fields[1] + if string.upper(key) == string.upper('Feature'): + if string.upper(subkey) == string.upper('Raid'): + specs['raid'] = value + elif string.upper(subkey) == string.upper('Pool'): + specs['pool'] = value + elif string.upper(key) == string.upper('Affinity'): + # Only fill this in if ISE FW supports volume affinity + if self.configuration.ise_affinity: + if string.upper(subkey) == string.upper('Type'): + specs['affinity'] = value + elif string.upper(key) == string.upper('Alloc'): + # Only fill this in if ISE FW supports thin provisioning + if self.configuration.san_thin_provision: + if string.upper(subkey) == string.upper('Type'): + specs['alloctype'] = value + return specs + + def _get_qos_specs(self, ctxt, type_id): + """Get QoS specs from volume type.""" + specs = {} + specs['minIOPS'] = '' + specs['maxIOPS'] = '' + specs['burstIOPS'] = '' + if type_id is not None: + volume_type = volume_types.get_volume_type(ctxt, type_id) + qos_specs_id = volume_type.get('qos_specs_id') + if qos_specs_id is not None: + kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] + else: + kvs = volume_type.get('extra_specs') + # Parse out min, max and burst values + for key, value in kvs.items(): + if ':' in key: + fields = key.split(':') + key = fields[1] + if string.upper(key) == string.upper('minIOPS'): + specs['minIOPS'] = value + elif string.upper(key) == string.upper('maxIOPS'): + specs['maxIOPS'] = value + elif string.upper(key) == string.upper('burstIOPS'): + specs['burstIOPS'] = value + return specs + + def create_volume(self, volume): + """Create requested volume""" + LOG.debug("X-IO create_volume called.") + # get extra_specs and qos based on volume type + # these functions fill in default values for entries used below + ctxt = context.get_admin_context() + type_id = volume['volume_type_id'] + extra_specs = self._get_extra_specs(ctxt, type_id) + LOG.debug("Volume %(volume_name)s extra_specs %(extra_specs)s", + {'volume_name': volume['name'], 'extra_specs': extra_specs}) + qos = self._get_qos_specs(ctxt, type_id) + # Make create call + url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid()) + resp = self._send_cmd('POST', url, + {'name': volume['name'], + 'size': volume['size'], + 'pool': extra_specs['pool'], + 'redundancy': extra_specs['raid'], + 'affinity': extra_specs['affinity'], + 'alloctype': extra_specs['alloctype'], + 'IOPSmin': qos['minIOPS'], + 'IOPSmax': qos['maxIOPS'], + 'IOPSburst': qos['burstIOPS']}) + if resp['status'] != 201: + LOG.error(_LE("Failed to create volume: %(name)s (%(status)s)"), + {'name': volume['name'], 'status': resp['status']}) + RaiseXIODriverException() + # Good response. Make sure volume is in operational state before + # returning. Volume creation completes asynchronously. + args = {} + args['name'] = volume['name'] + args['status_string'] = OPERATIONAL_STATUS + retries = self.configuration.ise_completion_retries + vol_info = self._wait_for_completion(self._help_wait_for_status, + args, retries) + if OPERATIONAL_STATUS in vol_info['string']: + # Ready. + LOG.info(_LI("Volume %s created"), volume['name']) + else: + LOG.error(_LE("Failed to create volume %s."), volume['name']) + RaiseXIODriverException() + return + + def create_cloned_volume(self, volume, src_vref): + """Create clone""" + LOG.debug("X-IO create_cloned_volume called.") + self._create_clone(src_vref, volume, 'clone') + + def create_snapshot(self, snapshot): + """Create snapshot""" + LOG.debug("X-IO create_snapshot called.") + # Creating a snapshot uses same interface as clone operation on + # ISE. Clone type ('snapshot' or 'clone') tells the ISE what kind + # of operation is requested. + self._create_clone(snapshot, snapshot, 'snapshot') + + def create_volume_from_snapshot(self, volume, snapshot): + """Create volume from snapshot""" + LOG.debug("X-IO create_volume_from_snapshot called.") + # ISE snapshots are just like a volume so this is a clone operation. + self._create_clone(snapshot, volume, 'clone') + + def _delete_volume(self, volume): + """Delete specified volume""" + LOG.debug("X-IO delete_volume called.") + # First unpresent volume from all hosts. + self._alloc_location(volume, '', 1) + # Get volume status. Location string for volume comes back + # in response. Used for DELETE call below. + vol_info = self._get_volume_info(volume['name']) + if vol_info['location'] == '': + LOG.warning(_LW("Delete volume: %s not found!"), volume['name']) + return + # Make DELETE call. + args = {} + args['method'] = 'DELETE' + args['url'] = vol_info['location'] + args['arglist'] = {} + args['status'] = 204 + retries = self.configuration.ise_completion_retries + resp = self._wait_for_completion(self._help_call_method, args, retries) + if resp['status'] == 204: + LOG.info(_LI("Volume %s deleted."), volume['name']) + return + + def delete_volume(self, volume): + """Delete specified volume""" + LOG.debug("X-IO delete_volume called.") + self._delete_volume(volume) + + def delete_snapshot(self, snapshot): + """Delete snapshot""" + LOG.debug("X-IO delete_snapshot called.") + # Delete snapshot and delete volume is identical to ISE. + self._delete_volume(snapshot) + + def _modify_volume(self, volume, new_attributes): + # Get volume status. Location string for volume comes back + # in response. Used for PUT call below. + vol_info = self._get_volume_info(volume['name']) + if vol_info['location'] == '': + LOG.error(_LE("modify volume: %s does not exist!"), volume['name']) + RaiseXIODriverException() + # Make modify volume REST call using PUT. + # Location from above is used as identifier. + resp = self._send_cmd('PUT', vol_info['location'], new_attributes) + status = resp['status'] + if status == 201: + LOG.debug("Volume %s modified.", volume['name']) + return True + LOG.error(_LE("Modify volume PUT failed: %(name)s (%(status)d)."), + {'name': volume['name'], 'status': status}) + RaiseXIODriverException() + + def extend_volume(self, volume, new_size): + """Extend volume to new size.""" + LOG.debug("extend_volume called") + ret = self._modify_volume(volume, {'size': new_size}) + if ret is True: + LOG.info(_LI("volume %(name)s extended to %(size)d."), + {'name': volume['name'], 'size': new_size}) + return + + def retype(self, ctxt, volume, new_type, diff, host): + """Convert the volume to be of the new type.""" + LOG.debug("X-IO retype called") + qos = self._get_qos_specs(ctxt, new_type['id']) + ret = self._modify_volume(volume, {'IOPSmin': qos['minIOPS'], + 'IOPSmax': qos['maxIOPS'], + 'IOPSburst': qos['burstIOPS']}) + if ret is True: + LOG.info(_LI("Volume %s retyped."), volume['name']) + return True + + def manage_existing(self, volume, ise_volume_ref): + """Convert an existing ISE volume to a Cinder volume.""" + LOG.debug("X-IO manage_existing called") + if 'source-name' not in ise_volume_ref: + LOG.error(_LE("manage_existing: No source-name in ref!")) + RaiseXIODriverException() + # copy the source-name to 'name' for modify volume use + ise_volume_ref['name'] = ise_volume_ref['source-name'] + ctxt = context.get_admin_context() + qos = self._get_qos_specs(ctxt, volume['volume_type_id']) + ret = self._modify_volume(ise_volume_ref, + {'name': volume['name'], + 'IOPSmin': qos['minIOPS'], + 'IOPSmax': qos['maxIOPS'], + 'IOPSburst': qos['burstIOPS']}) + if ret is True: + LOG.info(_LI("Volume %s converted."), ise_volume_ref['name']) + return ret + + def manage_existing_get_size(self, volume, ise_volume_ref): + """Get size of an existing ISE volume.""" + LOG.debug("X-IO manage_existing_get_size called") + if 'source-name' not in ise_volume_ref: + LOG.error(_LE("manage_existing_get_size: No source-name in ref!")) + RaiseXIODriverException() + ref_name = ise_volume_ref['source-name'] + # get volume status including size + vol_info = self._get_volume_info(ref_name) + if vol_info['location'] == '': + LOG.error(_LE("manage_existing_get_size: %s does not exist!"), + ref_name) + RaiseXIODriverException() + return int(vol_info['size']) + + def unmanage(self, volume): + """Remove Cinder management from ISE volume""" + LOG.debug("X-IO unmanage called") + vol_info = self._get_volume_info(volume['name']) + if vol_info['location'] == '': + LOG.error(_LE("unmanage: Volume %s does not exist!"), + volume['name']) + RaiseXIODriverException() + # This is a noop. ISE does not store any Cinder specific information. + + def ise_present(self, volume, hostname_in, endpoints): + """Set up presentation for volume and specified connector""" + LOG.debug("X-IO ise_present called.") + # Create host entry on ISE if necessary. + # Check to see if host entry already exists. + # Create if not found + host = self._find_host(endpoints) + if host['name'] == '': + # host not found, so create new host entry + # Use host name if filled in. If blank, ISE will make up a name. + self._create_host(hostname_in, endpoints) + host = self._find_host(endpoints) + if host['name'] == '': + # host still not found, this is fatal. + LOG.error(_LE("Host could not be found!")) + RaiseXIODriverException() + elif string.upper(host['type']) != 'OPENSTACK': + # Make sure host type is marked as Openstack host + params = {'os': 'openstack'} + resp = self._send_cmd('PUT', host['locator'], params) + status = resp['status'] + if status != 201 and status != 409: + LOG.error(_LE("Host PUT failed (%s)."), status) + RaiseXIODriverException() + # We have a host object. + target_lun = '' + # Present volume to host. + target_lun = self._present_volume(volume, host['name'], target_lun) + # Fill in target information. + data = {} + data['target_lun'] = target_lun + data['volume_id'] = volume['id'] + data['access_mode'] = 'rw' + return data + + def ise_unpresent(self, volume, endpoints): + """Delete presentation between volume and connector""" + LOG.debug("X-IO ise_unpresent called.") + # Delete allocation uses host name. Go find it based on endpoints. + host = self._find_host(endpoints) + if host['name'] != '': + # Delete allocation based on hostname and volume. + self._alloc_location(volume, host['name'], 1) + return host['name'] + + def create_export(self, context, volume): + LOG.debug("X-IO create_export called.") + + def ensure_export(self, context, volume): + LOG.debug("X-IO ensure_export called.") + + def remove_export(self, context, volume): + LOG.debug("X-IO remove_export called.") + + def local_path(self, volume): + LOG.debug("X-IO local_path called.") + + def delete_host(self, endpoints): + """Delete ISE host object""" + host = self._find_host(endpoints) + if host['locator'] != '': + # Delete host + self._send_cmd('DELETE', host['locator'], {}) + LOG.debug("X-IO: host %s deleted", host['name']) + + +# Protocol specific classes for entry. They are wrappers around base class +# above and every external API resuslts in a call to common function in base +# class. +class XIOISEISCSIDriver(driver.ISCSIDriver): + + """Requires ISE Running FW version 3.1.0 or higher""" + + def __init__(self, *args, **kwargs): + super(XIOISEISCSIDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(XIO_OPTS) + self.configuration.append_config_values(san.san_opts) + + # The iscsi_ip_address must always be set. + if self.configuration.iscsi_ip_address == '': + LOG.error(_LE("iscsi_ip_address must be set!")) + RaiseXIODriverException() + # Setup common driver + self.driver = XIOISEDriver(configuration=self.configuration) + + def do_setup(self, context): + return self.driver.do_setup(context) + + def check_for_setup_error(self): + return self.driver.check_for_setup_error() + + def local_path(self, volume): + return self.driver.local_path(volume) + + def get_volume_stats(self, refresh=False): + data = self.driver.get_volume_stats(refresh) + data["storage_protocol"] = 'iSCSI' + return data + + def create_volume(self, volume): + self.driver.create_volume(volume) + # Volume created successfully. Fill in CHAP information. + model_update = {} + chap = {} + chap = self.driver.find_target_chap() + if chap['chap_user'] != '': + model_update['provider_auth'] = 'CHAP %s %s' % \ + (chap['chap_user'], chap['chap_passwd']) + else: + model_update['provider_auth'] = '' + return model_update + + def create_cloned_volume(self, volume, src_vref): + return self.driver.create_cloned_volume(volume, src_vref) + + def create_volume_from_snapshot(self, volume, snapshot): + return self.driver.create_volume_from_snapshot(volume, snapshot) + + def delete_volume(self, volume): + return self.driver.delete_volume(volume) + + def extend_volume(self, volume, new_size): + return self.driver.extend_volume(volume, new_size) + + def retype(self, ctxt, volume, new_type, diff, host): + return self.driver.retype(ctxt, volume, new_type, diff, host) + + def manage_existing(self, volume, ise_volume_ref): + ret = self.driver.manage_existing(volume, ise_volume_ref) + if ret is True: + # Volume converted successfully. Fill in CHAP information. + model_update = {} + chap = {} + chap = self.driver.find_target_chap() + if chap['chap_user'] != '': + model_update['provider_auth'] = 'CHAP %s %s' % \ + (chap['chap_user'], chap['chap_passwd']) + else: + model_update['provider_auth'] = '' + return model_update + + def manage_existing_get_size(self, volume, ise_volume_ref): + return self.driver.manage_existing_get_size(volume, ise_volume_ref) + + def unmanage(self, volume): + return self.driver.unmanage(volume) + + def initialize_connection(self, volume, connector): + hostname = '' + if 'host' in connector: + hostname = connector['host'] + data = self.driver.ise_present(volume, hostname, + connector['initiator']) + # find IP for target + data['target_portal'] = \ + '%s:3260' % (self.configuration.iscsi_ip_address) + # set IQN for target + data['target_discovered'] = False + data['target_iqn'] = \ + self.driver.find_target_iqn(self.configuration.iscsi_ip_address) + # Fill in authentication method (CHAP) + if 'provider_auth' in volume: + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + data['auth_method'] = auth_method + data['auth_username'] = auth_username + data['auth_password'] = auth_secret + return {'driver_volume_type': 'iscsi', + 'data': data} + + def terminate_connection(self, volume, connector, **kwargs): + hostname = self.driver.ise_unpresent(volume, connector['initiator']) + alloc_cnt = 0 + if hostname != '': + alloc_cnt = self.driver.find_allocations(hostname) + if alloc_cnt == 0: + # delete host object + self.driver.delete_host(connector['initiator']) + + def create_snapshot(self, snapshot): + return self.driver.create_snapshot(snapshot) + + def delete_snapshot(self, snapshot): + return self.driver.delete_snapshot(snapshot) + + def create_export(self, context, volume): + return self.driver.create_export(context, volume) + + def ensure_export(self, context, volume): + return self.driver.ensure_export(context, volume) + + def remove_export(self, context, volume): + return self.driver.remove_export(context, volume) + + +class XIOISEFCDriver(driver.FibreChannelDriver): + + """Requires ISE Running FW version 2.8.0 or higher""" + + def __init__(self, *args, **kwargs): + super(XIOISEFCDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(XIO_OPTS) + self.configuration.append_config_values(san.san_opts) + self.driver = XIOISEDriver(configuration=self.configuration) + + def do_setup(self, context): + return self.driver.do_setup(context) + + def check_for_setup_error(self): + return self.driver.check_for_setup_error() + + def local_path(self, volume): + return self.driver.local_path(volume) + + def get_volume_stats(self, refresh=False): + data = self.driver.get_volume_stats(refresh) + data["storage_protocol"] = 'fibre_channel' + return data + + def create_volume(self, volume): + return self.driver.create_volume(volume) + + def create_cloned_volume(self, volume, src_vref): + return self.driver.create_cloned_volume(volume, src_vref) + + def create_volume_from_snapshot(self, volume, snapshot): + return self.driver.create_volume_from_snapshot(volume, snapshot) + + def delete_volume(self, volume): + return self.driver.delete_volume(volume) + + def extend_volume(self, volume, new_size): + return self.driver.extend_volume(volume, new_size) + + def retype(self, ctxt, volume, new_type, diff, host): + return self.driver.retype(ctxt, volume, new_type, diff, host) + + def manage_existing(self, volume, ise_volume_ref): + return self.driver.manage_existing(volume, ise_volume_ref) + + def manage_existing_get_size(self, volume, ise_volume_ref): + return self.driver.manage_existing_get_size(volume, ise_volume_ref) + + def unmanage(self, volume): + return self.driver.unmanage(volume) + + @fczm_utils.AddFCZone + def initialize_connection(self, volume, connector): + hostname = '' + if 'host' in connector: + hostname = connector['host'] + data = self.driver.ise_present(volume, hostname, connector['wwpns']) + data['target_discovered'] = True + # set wwns for target + target_wwns = self.driver.find_target_wwns() + data['target_wwn'] = target_wwns + # build target initiator map + target_map = {} + for initiator in connector['wwpns']: + target_map[initiator] = target_wwns + data['initiator_target_map'] = target_map + return {'driver_volume_type': 'fibre_channel', + 'data': data} + + @fczm_utils.RemoveFCZone + def terminate_connection(self, volume, connector, **kwargs): + # now we are ready to tell ISE to delete presentations + hostname = self.driver.ise_unpresent(volume, connector['wwpns']) + # set target_wwn and initiator_target_map only if host + # has no more presentations + data = {} + alloc_cnt = 0 + if hostname != '': + alloc_cnt = self.driver.find_allocations(hostname) + if alloc_cnt == 0: + target_wwns = self.driver.find_target_wwns() + data['target_wwn'] = target_wwns + # build target initiator map + target_map = {} + for initiator in connector['wwpns']: + target_map[initiator] = target_wwns + data['initiator_target_map'] = target_map + # delete host object + self.driver.delete_host(connector['wwpns']) + + return {'driver_volume_type': 'fibre_channel', + 'data': data} + + def create_snapshot(self, snapshot): + return self.driver.create_snapshot(snapshot) + + def delete_snapshot(self, snapshot): + return self.driver.delete_snapshot(snapshot) + + def create_export(self, context, volume): + return self.driver.create_export(context, volume) + + def ensure_export(self, context, volume): + return self.driver.ensure_export(context, volume) + + def remove_export(self, context, volume): + return self.driver.remove_export(context, volume) -- 2.45.2