From e188dc1c80c2213c7a580021006f042140c30f3b Mon Sep 17 00:00:00 2001 From: Rick Chen Date: Mon, 11 Aug 2014 10:55:28 +0800 Subject: [PATCH] Add ProphetStor DPL Storage server volume driver for Cinder ProphetStor DPL Storage server enables x86 commodity hardware as enterprise-grade storage systems. *[2014/07/14] Remove roll-back function. *[2014/07/16] Add decorate fiber zone manage utils in initialize_connection and terminate_connection. Use mock instead of mox in tests. *[2014/07/21] Update cinder volume certification report. *[2014/07/29] Openstack continuous integration platform test. Retry: 27 *[2014/07/31] Rebase *[2014/08/06] Refine code *[2014/08/11] Rebase and enhance to support thin/thick volume Implements: blueprint prophetstor-dpl-driver cinder-cert-results: https://bugs.launchpad.net/cinder/+bug/1354066 Change-Id: Iced5e45362aef4286bb7f1c848ab7cb3573b5c02 --- cinder/tests/test_prophetstor_dpl.py | 538 +++++++++ cinder/volume/drivers/prophetstor/__init__.py | 0 cinder/volume/drivers/prophetstor/dpl_fc.py | 421 +++++++ .../volume/drivers/prophetstor/dpl_iscsi.py | 149 +++ .../volume/drivers/prophetstor/dplcommon.py | 1061 +++++++++++++++++ cinder/volume/drivers/prophetstor/options.py | 30 + etc/cinder/cinder.conf.sample | 12 + 7 files changed, 2211 insertions(+) create mode 100644 cinder/tests/test_prophetstor_dpl.py create mode 100644 cinder/volume/drivers/prophetstor/__init__.py create mode 100644 cinder/volume/drivers/prophetstor/dpl_fc.py create mode 100644 cinder/volume/drivers/prophetstor/dpl_iscsi.py create mode 100644 cinder/volume/drivers/prophetstor/dplcommon.py create mode 100644 cinder/volume/drivers/prophetstor/options.py diff --git a/cinder/tests/test_prophetstor_dpl.py b/cinder/tests/test_prophetstor_dpl.py new file mode 100644 index 000000000..85d422f2c --- /dev/null +++ b/cinder/tests/test_prophetstor_dpl.py @@ -0,0 +1,538 @@ +# Copyright (c) 2014 ProphetStor, Inc. +# All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import httplib + +import mock + +from cinder.openstack.common import units +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.prophetstor import dpl_iscsi as DPLDRIVER +from cinder.volume.drivers.prophetstor import dplcommon as DPLCOMMON + +POOLUUID = 'ac33fc6e417440d5a1ef27d7231e1cc4' +VOLUMEUUID = 'a000000000000000000000000000001' +INITIATOR = 'iqn.2013-08.org.debian:01:aaaaaaaa' +DATA_IN_VOLUME = {'id': VOLUMEUUID} +DATA_IN_CONNECTOR = {'initiator': INITIATOR} +## dpl.getpool +DATA_SERVER_INFO = 0, { + 'metadata': {'vendor': 'ProphetStor', + 'version': '1.5'}} + +DATA_POOLINFO = 0, { + 'capabilitiesURI': '', + 'children': [], + 'childrenrange': '', + 'completionStatus': 'Complete', + 'metadata': {'available_capacity': 4194074624, + 'ctime': 1390551362349, + 'vendor': 'prophetstor', + 'version': '1.5', + 'display_description': 'Default Pool', + 'display_name': 'default_pool', + 'event_uuid': '4f7c4d679a664857afa4d51f282a516a', + 'physical_device': {'cache': [], + 'data': ['disk_uuid_0', + 'disk_uuid_1', + 'disk_uuid_2'], + 'log': [], + 'spare': []}, + 'pool_uuid': POOLUUID, + 'properties': {'raid_level': 'raid0'}, + 'state': 'Online', + 'total_capacity': 4194828288, + 'zpool_guid': '8173612007304181810'}, + 'objectType': 'application/cdmi-container', + 'percentComplete': 100} + +## dpl.assignvdev +DATA_ASSIGNVDEV = 0, { + 'children': [], + 'childrenrange': '', + 'completionStatus': 'Complete', + 'domainURI': '', + 'exports': {'Network/iSCSI': [ + {'logical_unit_name': '', + 'logical_unit_number': '101', + 'permissions': [INITIATOR], + 'portals': ['172.31.1.210:3260'], + 'target_identifier': + 'iqn.2013-09.com.prophetstor:hypervisor.886423051816' + }]}, + 'metadata': {'ctime': 0, + 'event_uuid': 'c11e90287e9348d0b4889695f1ec4be5', + 'type': 'volume'}, + 'objectID': '', + 'objectName': 'd827e23d403f4f12bb208a6fec208fd8', + 'objectType': 'application/cdmi-container', + 'parentID': '8daa374670af447e8efea27e16bf84cd', + 'parentURI': '/dpl_volume', + 'snapshots': [] +} + +DATA_OUTPUT = 0, None + +DATA_IN_VOLUME = {'id': 'abc123', + 'display_name': 'abc123', + 'display_description': '', + 'size': 1} + +DATA_IN_VOLUME1 = {'id': 'abc456', + 'display_name': 'abc456', + 'display_description': '', + 'size': 1} + +DATA_IN_SNAPSHOT = {'id': 'snapshot1', + 'volume_id': 'abc123', + 'display_name': 'snapshot1', + 'display_description': ''} + + +class TestProphetStorDPLVolume(test.TestCase): + + def _gen_snapshot_url(self, vdevid, snapshotid): + snapshot_url = '/%s/%s/%s' % (vdevid, DPLCOMMON.DPL_OBJ_SNAPSHOT, + snapshotid) + return snapshot_url + + def setUp(self): + super(TestProphetStorDPLVolume, self).setUp() + self.dplcmd = DPLCOMMON.DPLVolume('1.1.1.1', 8356, 'admin', 'password') + self.DPL_MOCK = mock.MagicMock() + self.dplcmd.objCmd = self.DPL_MOCK + self.DPL_MOCK.send_cmd.return_value = DATA_OUTPUT + + def test_getserverinfo(self): + self.dplcmd.get_server_info() + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'GET', + '/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_SYSTEM), + None, + [httplib.OK, httplib.ACCEPTED]) + + def test_createvdev(self): + self.dplcmd.create_vdev(DATA_IN_VOLUME['id'], + DATA_IN_VOLUME['display_name'], + DATA_IN_VOLUME['display_description'], + POOLUUID, + int(DATA_IN_VOLUME['size']) * units.Gi) + + metadata = {} + metadata['display_name'] = DATA_IN_VOLUME['display_name'] + metadata['display_description'] = DATA_IN_VOLUME['display_description'] + metadata['pool_uuid'] = POOLUUID + metadata['total_capacity'] = int(DATA_IN_VOLUME['size']) * units.Gi + metadata['maximum_snapshot'] = 1024 + metadata['properties'] = dict(thin_provision=True) + params = {} + params['metadata'] = metadata + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'PUT', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id']), + params, + [httplib.OK, httplib.ACCEPTED, httplib.CREATED]) + + def test_extendvdev(self): + self.dplcmd.extend_vdev(DATA_IN_VOLUME['id'], + DATA_IN_VOLUME['display_name'], + DATA_IN_VOLUME['display_description'], + int(DATA_IN_VOLUME['size']) * units.Gi) + metadata = {} + metadata['display_name'] = DATA_IN_VOLUME['display_name'] + metadata['display_description'] = DATA_IN_VOLUME['display_description'] + metadata['total_capacity'] = int(DATA_IN_VOLUME['size']) * units.Gi + metadata['maximum_snapshot'] = 1024 + params = {} + params['metadata'] = metadata + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'PUT', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id']), + params, + [httplib.OK, httplib.ACCEPTED, httplib.CREATED]) + + def test_deletevdev(self): + self.dplcmd.delete_vdev(DATA_IN_VOLUME['id'], True) + metadata = {} + params = {} + metadata['force'] = True + params['metadata'] = metadata + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'DELETE', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id']), + params, + [httplib.OK, httplib.ACCEPTED, httplib.NOT_FOUND, + httplib.NO_CONTENT]) + + def test_createvdevfromsnapshot(self): + self.dplcmd.create_vdev_from_snapshot( + DATA_IN_VOLUME['id'], + DATA_IN_VOLUME['display_name'], + DATA_IN_VOLUME['display_description'], + DATA_IN_SNAPSHOT['id'], + POOLUUID) + metadata = {} + params = {} + metadata['snapshot_operation'] = 'copy' + metadata['display_name'] = DATA_IN_VOLUME['display_name'] + metadata['display_description'] = DATA_IN_VOLUME['display_description'] + metadata['pool_uuid'] = POOLUUID + metadata['maximum_snapshot'] = 1024 + metadata['properties'] = dict(thin_provision=True) + params['metadata'] = metadata + params['copy'] = self._gen_snapshot_url(DATA_IN_VOLUME['id'], + DATA_IN_SNAPSHOT['id']) + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'PUT', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id']), + params, + [httplib.OK, httplib.ACCEPTED, httplib.CREATED]) + + def test_getpool(self): + self.dplcmd.get_pool(POOLUUID) + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'GET', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_POOL, + POOLUUID), + None, + [httplib.OK, httplib.ACCEPTED]) + + def test_clonevdev(self): + self.dplcmd.clone_vdev( + DATA_IN_VOLUME['id'], + DATA_IN_VOLUME1['id'], + POOLUUID, + DATA_IN_VOLUME['display_name'], + DATA_IN_VOLUME['display_description'], + int(DATA_IN_VOLUME['size']) * units.Gi + ) + metadata = {} + params = {} + metadata["snapshot_operation"] = "clone" + metadata["display_name"] = DATA_IN_VOLUME['display_name'] + metadata["display_description"] = DATA_IN_VOLUME['display_description'] + metadata["pool_uuid"] = POOLUUID + metadata["total_capacity"] = int(DATA_IN_VOLUME['size']) * units.Gi + metadata['maximum_snapshot'] = 1024 + metadata['properties'] = dict(thin_provision=True) + params["metadata"] = metadata + params["copy"] = DATA_IN_VOLUME['id'] + + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'PUT', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME1['id']), + params, + [httplib.OK, httplib.CREATED, httplib.ACCEPTED]) + + def test_createvdevsnapshot(self): + self.dplcmd.create_vdev_snapshot( + DATA_IN_VOLUME['id'], + DATA_IN_SNAPSHOT['id'], + DATA_IN_SNAPSHOT['display_name'], + DATA_IN_SNAPSHOT['display_description'] + ) + metadata = {} + params = {} + metadata['display_name'] = DATA_IN_SNAPSHOT['display_name'] + metadata['display_description'] = \ + DATA_IN_SNAPSHOT['display_description'] + params['metadata'] = metadata + params['snapshot'] = DATA_IN_SNAPSHOT['id'] + + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'PUT', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id']), + params, + [httplib.OK, httplib.CREATED, httplib.ACCEPTED]) + + def test_getvdev(self): + self.dplcmd.get_vdev(DATA_IN_VOLUME['id']) + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'GET', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id']), + None, + [httplib.OK, httplib.ACCEPTED, httplib.NOT_FOUND]) + + def test_getvdevstatus(self): + self.dplcmd.get_vdev_status(DATA_IN_VOLUME['id'], '123456') + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'GET', + '/%s/%s/%s/?event_uuid=%s' % (DPLCOMMON.DPL_VER_V1, + DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id'], + '123456'), + None, + [httplib.OK, httplib.NOT_FOUND]) + + def test_getpoolstatus(self): + self.dplcmd.get_pool_status(POOLUUID, '123456') + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'GET', + '/%s/%s/%s/?event_uuid=%s' % (DPLCOMMON.DPL_VER_V1, + DPLCOMMON.DPL_OBJ_POOL, + POOLUUID, + '123456'), + None, + [httplib.OK, httplib.NOT_FOUND]) + + def test_assignvdev(self): + self.dplcmd.assign_vdev( + DATA_IN_VOLUME['id'], + 'iqn.1993-08.org.debian:01:test1', + '', + '1.1.1.1:3260', + 0 + ) + params = {} + metadata = {} + exports = {} + metadata['export_operation'] = 'assign' + exports['Network/iSCSI'] = {} + target_info = {} + target_info['logical_unit_number'] = 0 + target_info['logical_unit_name'] = '' + permissions = [] + portals = [] + portals.append('1.1.1.1:3260') + permissions.append('iqn.1993-08.org.debian:01:test1') + target_info['permissions'] = permissions + target_info['portals'] = portals + exports['Network/iSCSI'] = target_info + + params['metadata'] = metadata + params['exports'] = exports + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'PUT', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, + DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id']), + params, + [httplib.OK, httplib.ACCEPTED, httplib.CREATED]) + + def test_unassignvdev(self): + self.dplcmd.unassign_vdev(DATA_IN_VOLUME['id'], + 'iqn.1993-08.org.debian:01:test1', + '') + params = {} + metadata = {} + exports = {} + metadata['export_operation'] = 'unassign' + params['metadata'] = metadata + + exports['Network/iSCSI'] = {} + exports['Network/iSCSI']['target_identifier'] = '' + permissions = [] + permissions.append('iqn.1993-08.org.debian:01:test1') + exports['Network/iSCSI']['permissions'] = permissions + + params['exports'] = exports + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'PUT', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, + DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id']), + params, + [httplib.OK, httplib.ACCEPTED, + httplib.NO_CONTENT, httplib.NOT_FOUND]) + + def test_deletevdevsnapshot(self): + self.dplcmd.delete_vdev_snapshot(DATA_IN_VOLUME['id'], + DATA_IN_SNAPSHOT['id']) + params = {} + params['copy'] = self._gen_snapshot_url(DATA_IN_VOLUME['id'], + DATA_IN_SNAPSHOT['id']) + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'DELETE', + '/%s/%s/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, + DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id'], + DPLCOMMON.DPL_OBJ_SNAPSHOT, + DATA_IN_SNAPSHOT['id']), + None, + [httplib.OK, httplib.ACCEPTED, httplib.NO_CONTENT, + httplib.NOT_FOUND]) + + def test_listvdevsnapshots(self): + self.dplcmd.list_vdev_snapshots(DATA_IN_VOLUME['id']) + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'GET', + '/%s/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, + DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id'], + DPLCOMMON.DPL_OBJ_SNAPSHOT), + None, + [httplib.OK]) + + +class TestProphetStorDPLDriver(test.TestCase): + + def __init__(self, method): + super(TestProphetStorDPLDriver, self).__init__(method) + + def _conver_uuid2hex(self, strID): + return strID.replace('-', '') + + def setUp(self): + super(TestProphetStorDPLDriver, self).setUp() + self.configuration = mock.Mock(conf.Configuration) + self.configuration.san_ip = '1.1.1.1' + self.configuration.dpl_port = 8356 + self.configuration.san_login = 'admin' + self.configuration.san_password = 'password' + self.configuration.dpl_pool = POOLUUID + self.configuration.iscsi_port = 3260 + self.configuration.san_is_local = False + self.configuration.san_thin_provision = True + self.context = '' + self.DPL_MOCK = mock.MagicMock() + self.dpldriver = DPLDRIVER.DPLISCSIDriver( + configuration=self.configuration) + self.dpldriver.dpl = self.DPL_MOCK + self.dpldriver.do_setup(self.context) + + def test_get_volume_stats(self): + self.DPL_MOCK.get_pool.return_value = DATA_POOLINFO + self.DPL_MOCK.get_server_info.return_value = DATA_SERVER_INFO + res = self.dpldriver.get_volume_stats(True) + self.assertEqual(res['vendor_name'], 'ProphetStor') + self.assertEqual(res['driver_version'], '1.5') + self.assertEqual(res['total_capacity_gb'], 3.91) + self.assertEqual(res['free_capacity_gb'], 3.91) + self.assertEqual(res['reserved_percentage'], 0) + self.assertEqual(res['QoS_support'], False) + + def test_create_volume(self): + self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT + self.dpldriver.create_volume(DATA_IN_VOLUME) + self.DPL_MOCK\ + .create_vdev\ + .assert_called_once_with( + self._conver_uuid2hex(DATA_IN_VOLUME['id']), + DATA_IN_VOLUME['display_name'], + DATA_IN_VOLUME['display_description'], + self.configuration.dpl_pool, + int(DATA_IN_VOLUME['size']) * units.Gi, + True) + + def test_delete_volume(self): + self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT + self.dpldriver.delete_volume(DATA_IN_VOLUME) + self.DPL_MOCK\ + .delete_vdev\ + .assert_called_once_with(self + ._conver_uuid2hex(DATA_IN_VOLUME['id'])) + + def test_create_volume_from_snapshot(self): + self.DPL_MOCK.create_vdev_from_snapshot.return_value = DATA_OUTPUT + self.dpldriver.create_volume_from_snapshot(DATA_IN_VOLUME, + DATA_IN_SNAPSHOT) + self.DPL_MOCK\ + .create_vdev_from_snapshot\ + .assert_called_once_with(self + ._conver_uuid2hex(DATA_IN_VOLUME['id']), + DATA_IN_VOLUME['display_name'], + DATA_IN_VOLUME['display_description'], + self + ._conver_uuid2hex(DATA_IN_SNAPSHOT['id']), + self.configuration.dpl_pool, + True) + + def test_create_cloned_volume(self): + self.DPL_MOCK.clone_vdev.return_value = DATA_OUTPUT + self.dpldriver.create_cloned_volume(DATA_IN_VOLUME1, DATA_IN_VOLUME) + self.DPL_MOCK\ + .clone_vdev\ + .assert_called_once_with(self + ._conver_uuid2hex(DATA_IN_VOLUME['id']), + self + ._conver_uuid2hex(DATA_IN_VOLUME1['id']), + self.configuration.dpl_pool, + DATA_IN_VOLUME1['display_name'], + DATA_IN_VOLUME1['display_description'], + int(DATA_IN_VOLUME1['size']) * + units.Gi, + True) + + def test_create_snapshot(self): + self.DPL_MOCK.create_vdev_snapshot.return_value = DATA_OUTPUT + self.dpldriver.create_snapshot(DATA_IN_SNAPSHOT) + self.DPL_MOCK\ + .create_vdev_snapshot\ + .assert_called_once_with( + self._conver_uuid2hex(DATA_IN_SNAPSHOT['volume_id']), + self._conver_uuid2hex(DATA_IN_SNAPSHOT['id']), + DATA_IN_SNAPSHOT['display_name'], + DATA_IN_SNAPSHOT['display_description']) + + def test_delete_snapshot(self): + self.DPL_MOCK.delete_vdev_snapshot.return_value = DATA_OUTPUT + self.dpldriver.delete_snapshot(DATA_IN_SNAPSHOT) + self.DPL_MOCK\ + .delete_vdev_snapshot\ + .assert_called_once_with( + self._conver_uuid2hex(DATA_IN_SNAPSHOT['volume_id']), + self._conver_uuid2hex(DATA_IN_SNAPSHOT['id'])) + + def test_initialize_connection(self): + self.DPL_MOCK.assign_vdev.return_value = DATA_ASSIGNVDEV + self.DPL_MOCK.get_vdev.return_value = DATA_ASSIGNVDEV + res = self.dpldriver.initialize_connection(DATA_IN_VOLUME, + DATA_IN_CONNECTOR) + self.assertEqual(res['driver_volume_type'], 'iscsi') + self.assertEqual(res['data']['target_lun'], '101') + self.assertEqual(res['data']['target_discovered'], True) + self.assertEqual(res['data']['target_portal'], '172.31.1.210:3260') + self.assertEqual(res['data']['target_iqn'], 'iqn.2013-09.com.' + 'prophetstor:hypervisor.' + '886423051816') + + def test_terminate_connection(self): + self.DPL_MOCK.unassign_vdev.return_value = DATA_OUTPUT + self.dpldriver.terminate_connection(DATA_IN_VOLUME, DATA_IN_CONNECTOR) + self.DPL_MOCK\ + .unassign_vdev\ + .assert_called_once_with( + self._conver_uuid2hex(DATA_IN_VOLUME['id']), + DATA_IN_CONNECTOR['initiator']) + + def test_get_pool_info(self): + self.DPL_MOCK.get_pool.return_value = DATA_POOLINFO + _, res = self.dpldriver._get_pool_info(POOLUUID) + self.assertEqual(res['metadata']['available_capacity'], 4194074624) + self.assertEqual(res['metadata']['ctime'], 1390551362349) + self.assertEqual(res['metadata']['display_description'], + 'Default Pool') + self.assertEqual(res['metadata']['display_name'], + 'default_pool') + self.assertEqual(res['metadata']['event_uuid'], + '4f7c4d679a664857afa4d51f282a516a') + self.assertEqual(res['metadata']['physical_device'], { + 'cache': [], + 'data': ['disk_uuid_0', 'disk_uuid_1', 'disk_uuid_2'], + 'log': [], + 'spare': []}) + self.assertEqual(res['metadata']['pool_uuid'], POOLUUID) + self.assertEqual(res['metadata']['properties'], { + 'raid_level': 'raid0'}) + self.assertEqual(res['metadata']['state'], 'Online') + self.assertEqual(res['metadata']['total_capacity'], 4194828288) + self.assertEqual(res['metadata']['zpool_guid'], '8173612007304181810') diff --git a/cinder/volume/drivers/prophetstor/__init__.py b/cinder/volume/drivers/prophetstor/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cinder/volume/drivers/prophetstor/dpl_fc.py b/cinder/volume/drivers/prophetstor/dpl_fc.py new file mode 100644 index 000000000..a03ef126a --- /dev/null +++ b/cinder/volume/drivers/prophetstor/dpl_fc.py @@ -0,0 +1,421 @@ +# Copyright (c) 2014 ProphetStor, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import errno +import six + +from cinder import exception +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.volume import driver +from cinder.volume.drivers.prophetstor import dplcommon +from cinder.zonemanager import utils as fczm_utils + +LOG = logging.getLogger(__name__) + + +class DPLFCDriver(dplcommon.DPLCOMMONDriver, + driver.FibreChannelDriver): + def __init__(self, *args, **kwargs): + super(DPLFCDriver, self).__init__(*args, **kwargs) + + def _get_fc_channel(self): + """return : + fcInfos[uuid] + fcInfo[uuid]['display_name'] + fcInfo[uuid]['display_description'] + fcInfo[uuid]['hardware_address'] + fcInfo[uuid]['type'] + fcInfo[uuid]['speed'] + fcInfo[uuid]['state'] + """ + output = None + fcInfos = {} + try: + retCode, output = self.dpl.get_server_info() + if retCode == 0 and output: + fcUuids = output.get('metadata', + {}).get('storage_adapter', {}).keys() + for fcUuid in fcUuids: + fcInfo = output.get('metadata', + {}).get('storage_adapter', + {}).get(fcUuid) + if fcInfo['type'] == 'fc': + fcInfos[fcUuid] = fcInfo + except Exception as e: + msg = _("Failed to get fiber channel info from storage due " + "to %(stat)s") % {'stat': six.string_types(e)} + LOG.error(msg) + return fcInfos + + def _get_targets(self): + """return:: + targetInfos[uuid] = targetInfo + targetInfo['targetUuid'] + targetInfo['targetName'] + targetInfo['targetAddr'] + """ + output = None + targetInfos = {} + try: + retCode, output = self.dpl.get_target_list('target') + if retCode == 0 and output: + for targetInfo in output.get('children', []): + targetI = {} + targetI['targetUuid'] = targetInfo[0] + targetI['targetName'] = targetInfo[1] + targetI['targetAddr'] = targetInfo[2] + targetInfos[str(targetInfo[0])] = targetI + except Exception as e: + msg = _("Failed to get fiber channel target from storage server" + " due to %(stat)s") % {'stat': six.text_type(e)} + targetInfos = {} + LOG.error(msg) + return targetInfos + + def _get_targetwpns(self, volumeid, initiatorWwpns): + lstargetWwpns = [] + try: + ret, output = self.dpl.get_vdev(volumeid) + if ret == 0 and output: + exports = output.get('exports', {}) + fc_infos = exports.get('Network/FC', {}) + for fc_info in fc_infos: + for p in fc_info.get('permissions', []): + if p.get(initiatorWwpns, None): + targetWwpns = fc_info.get('target_identifier', '') + lstargetWwpns.append(targetWwpns) + except Exception as e: + msg = _("Failed to get target wwpns from storage due " + "to %(stat)s") % {'stat': six.text_type(e)} + LOG.error(msg) + lstargetWwpns = [] + return lstargetWwpns + + def _is_initiator_wwpn_active(self, targetWwpn, initiatorWwpn): + fActive = False + output = None + try: + retCode, output = self.dpl.get_sns_table(targetWwpn) + if retCode == 0 and output: + for fdwwpn, fcport in output.get('metadata', + {}).get('sns_table', + []): + if fdwwpn == initiatorWwpn: + fActive = True + break + except Exception: + LOG.error(_('Failed to get sns table')) + return fActive + + def _convertHex2String(self, wwpns): + szwwpns = '' + if len(str(wwpns)) == 16: + szwwpns = '%2s:%2s:%2s:%2s:%2s:%2s:%2s:%2s' % ( + str(wwpns)[0:2], + str(wwpns)[2:4], + str(wwpns)[4:6], + str(wwpns)[6:8], + str(wwpns)[8:10], + str(wwpns)[10:12], + str(wwpns)[12:14], + str(wwpns)[14:16]) + return szwwpns + + def _export_fc(self, volumeid, targetwwpns, initiatorwwpns, volumename): + ret = 0 + output = '' + msg = _('Export fc: %(volume)s, %(wwpns)s, %(iqn)s, %(volumename)s') \ + % {'volume': volumeid, 'wwpns': targetwwpns, + 'iqn': initiatorwwpns, 'volumename': volumename} + LOG.debug(msg) + try: + ret, output = self.dpl.assign_vdev_fc( + self._conver_uuid2hex(volumeid), targetwwpns, + initiatorwwpns, volumename) + except Exception: + msg = _('Volume %(volumeid) failed to send assign command, ' + 'ret: %(status)s output: %(output)s') % \ + {'volumeid': volumeid, 'status': ret, 'output': output} + LOG.error(msg) + ret = errno.EFAULT + + if ret == 0: + ret, event_uuid = self._get_event_uuid(output) + + if ret == errno.EAGAIN: + status = self._wait_event( + self.dpl.get_vdev_status, + self._conver_uuid2hex(volumeid), event_uuid) + if status['state'] == 'error': + ret = errno.EFAULT + msg = _('Flexvisor failed to assign volume %(id)s: ' + '%(status)s.') % {'id': volumeid, + 'status': status} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + ret = 0 + elif ret != 0: + msg = _('Flexvisor assign volume failed:%(id)s:' + '%(status)s.') % {'id': volumeid, 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return ret + + def _delete_export_fc(self, volumeid, targetwwpns, initiatorwwpns): + ret = 0 + output = '' + ret, output = self.dpl.unassign_vdev_fc( + self._conver_uuid2hex(volumeid), + targetwwpns, initiatorwwpns) + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event( + self.dpl.get_vdev_status, volumeid, event_uuid) + if status['state'] == 'error': + msg = _('Flexvisor failed to unassign volume %(id)s:' + ' %(status)s.') % {'id': volumeid, + 'status': status} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + ret = 0 + else: + msg = _('Flexvisor failed to unassign volume (get event) ' + '%(id)s.') % {'id': volumeid} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + elif ret != 0: + msg = _('Flexvisor unassign volume failed:%(id)s:' + '%(status)s.') % {'id': volumeid, 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _('Flexvisor succeed to unassign volume ' + '%(id)s.') % {'id': volumeid} + LOG.info(msg) + + return ret + + def _build_initiator_target_map(self, connector, tgtwwns): + """Build the target_wwns and the initiator target map.""" + init_targ_map = {} + initiator_wwns = connector['wwpns'] + for initiator in initiator_wwns: + init_targ_map[initiator] = tgtwwns + + return init_targ_map + + @fczm_utils.AddFCZone + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info.""" + """ + connector = {'ip': CONF.my_ip, + 'host': CONF.host, + 'initiator': self._initiator, + 'wwnns': self._fc_wwnns, + 'wwpns': self._fc_wwpns} + + """ + dc_fc = {} + dc_target = {} + lsTargetWwpn = [] + output = None + properties = {} + preferTargets = {} + ret = 0 + targetIdentifier = [] + szwwpns = [] + LOG.info('initialize_connection volume: %s,' + ' connector: %s' % (volume, connector)) + # Get Storage Fiber channel controller + dc_fc = self._get_fc_channel() + + # Get existed FC target list to decide target wwpn + dc_target = self._get_targets() + if len(dc_target) == 0: + msg = _('Backend storage did not configure fiber channel target.') + raise exception.VolumeBackendAPIException(data=msg) + + for keyFc in dc_fc.keys(): + for targetuuid in dc_target.keys(): + if dc_fc[keyFc]['hardware_address'] == \ + dc_target[targetuuid]['targetAddr']: + preferTargets[targetuuid] = dc_target[targetuuid] + break + # Confirm client wwpn is existed in sns table + # Covert wwwpns to 'xx:xx:xx:xx:xx:xx:xx:xx' format + for dwwpn in connector['wwpns']: + szwwpn = self._convertHex2String(dwwpn) + if len(szwwpn) == 0: + msg = _('Invalid wwpns format %(wwpns)s') % \ + {'wwpns': connector['wwpns']} + raise exception.VolumeBackendAPIException(data=msg) + LOG.error(msg) + szwwpns.append(szwwpn) + + if len(szwwpns): + for targetUuid in preferTargets.keys(): + targetWwpn = '' + targetWwpn = preferTargets.get(targetUuid, + {}).get('targetAddr', '') + lsTargetWwpn.append(targetWwpn) + # Use wwpns to assign volume. + msg = _('Prefer use target wwpn %(wwpn)s') % {'wwpn': lsTargetWwpn} + LOG.info(msg) + # Start to create export in all FC target node. + assignedTarget = [] + for pTarget in lsTargetWwpn: + try: + ret = self._export_fc(volume['id'], str(pTarget), szwwpns, + volume['name']) + if ret: + break + else: + assignedTarget.append(pTarget) + except Exception as e: + msg = _('Failed to export fiber channel target ' + 'due to %s') % (six.text_type(e)) + LOG.error(msg) + ret = errno.EFAULT + break + if ret == 0: + ret, output = self.dpl.get_vdev(self._conver_uuid2hex( + volume['id'])) + nLun = -1 + if ret == 0: + try: + for p in output['exports']['Network/FC']: + # check initiator wwpn existed in target initiator list + for initI in p.get('permissions', []): + for szwpn in szwwpns: + if initI.get(szwpn, None): + nLun = initI[szwpn] + break + if nLun != -1: + break + + if nLun != -1: + targetIdentifier.append( + str(p['target_identifier']).replace(':', '')) + + except Exception: + msg = _('Invalid connection initialization response of ' + 'volume %(name)s: ' + '%(output)s') % {'name': volume['name'], + 'output': output} + raise exception.VolumeBackendAPIException(data=msg) + + if nLun != -1: + init_targ_map = self._build_initiator_target_map(connector, + targetIdentifier) + properties['target_discovered'] = True + properties['target_wwn'] = targetIdentifier + properties['target_lun'] = int(nLun) + properties['volume_id'] = volume['id'] + properties['initiator_target_map'] = init_targ_map + msg = _('%(volume)s assign type fibre_channel, properties ' + '%(properties)s') % {'volume': volume['id'], + 'properties': properties} + LOG.info(msg) + else: + msg = _('Invalid connection initialization response of ' + 'volume %(name)s') % {'name': volume['name']} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + msg = _('Connect initialization info: ' + '{driver_volume_type: fibre_channel, ' + 'data: %(properties)s') % {'properties': properties} + LOG.info(msg) + return {'driver_volume_type': 'fibre_channel', + 'data': properties} + + @fczm_utils.RemoveFCZone + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + """ + connector = {'ip': CONF.my_ip, + 'host': CONF.host, + 'initiator': self._initiator, + 'wwnns': self._fc_wwnns, + 'wwpns': self._fc_wwpns} + """ + lstargetWwpns = [] + lsTargets = [] + szwwpns = [] + ret = 0 + info = {'driver_volume_type': 'fibre_channel', 'data': {}} + msg = _('terminate_connection volume: %(volume)s, ' + 'connector: %(con)s') % {'volume': volume, 'con': connector} + LOG.info(msg) + # Query targetwwpns. + # Get all target list of volume. + for dwwpn in connector['wwpns']: + szwwpn = self._convertHex2String(dwwpn) + if len(szwwpn) == 0: + msg = _('Invalid wwpns format %(wwpns)s') % \ + {'wwpns': connector['wwpns']} + raise exception.VolumeBackendAPIException(data=msg) + LOG.error(msg) + szwwpns.append(szwwpn) + + if len(szwwpns) == 0: + ret = errno.EFAULT + msg = _('Invalid wwpns format %(wwpns)s') % \ + {'wwpns': connector['wwpns']} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + for szwwpn in szwwpns: + lstargetWwpns = self._get_targetwpns( + self._conver_uuid2hex(volume['id']), szwwpn) + lsTargets = list(set(lsTargets + lstargetWwpns)) + + # Remove all export target + try: + for ptarget in lsTargets: + ret = self._delete_export_fc(volume['id'], ptarget, szwwpns) + if ret: + break + except Exception: + ret = errno.EFAULT + finally: + if ret: + msg = _('Faield to unassign %(volume)s') % (volume['id']) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Failed to delete export with fibre channel + if ret: + init_targ_map = self._build_initiator_target_map(connector, + lsTargets) + info['data'] = {'target_wwn': lsTargets, + 'initiator_target_map': init_targ_map} + + return info + + def get_volume_stats(self, refresh=False): + if refresh: + data = super(DPLFCDriver, self).get_volume_stats(refresh) + if data: + data['storage_protocol'] = 'FC' + backend_name = \ + self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = (backend_name or 'DPLFCDriver') + self._stats = data + return self._stats diff --git a/cinder/volume/drivers/prophetstor/dpl_iscsi.py b/cinder/volume/drivers/prophetstor/dpl_iscsi.py new file mode 100644 index 000000000..0a5aefc5d --- /dev/null +++ b/cinder/volume/drivers/prophetstor/dpl_iscsi.py @@ -0,0 +1,149 @@ +# Copyright (c) 2014 ProphetStor, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import errno + +from cinder import exception +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +import cinder.volume.driver +from cinder.volume.drivers.prophetstor import dplcommon + +LOG = logging.getLogger(__name__) + + +class DPLISCSIDriver(dplcommon.DPLCOMMONDriver, + cinder.volume.driver.ISCSIDriver): + def __init__(self, *args, **kwargs): + super(DPLISCSIDriver, self).__init__(*args, **kwargs) + + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info.""" + properties = {} + properties['target_lun'] = None + properties['target_discovered'] = True + properties['target_portal'] = '' + properties['target_iqn'] = None + properties['volume_id'] = volume['id'] + properties['access_mode'] = 'rw' + + dpl_server = self.configuration.san_ip + dpl_iscsi_port = self.configuration.iscsi_port + ret, output = self.dpl.assign_vdev(self._conver_uuid2hex( + volume['id']), connector['initiator'].lower(), volume['id'], + '%s:%d' % (dpl_server, dpl_iscsi_port), 0) + if ret == 0: + ret, event_uuid = self._get_event_uuid(output) + + if ret == errno.EAGAIN: + status = self._wait_event( + self.dpl.get_vdev_status, self._conver_uuid2hex( + volume['id']), event_uuid) + if status['state'] == 'error': + ret = errno.EFAULT + msg = _('Flexvisor failed to assign volume %(id)s: ' + '%(status)s.') % {'id': volume['id'], + 'status': status} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + elif ret != 0: + msg = _('Flexvisor assign volume failed.:%(id)s:' + '%(status)s.') % {'id': volume['id'], 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if ret == 0: + ret, output = self.dpl.get_vdev( + self._conver_uuid2hex(volume['id'])) + if ret == 0: + for tgInfo in output['exports']['Network/iSCSI']: + if tgInfo['permissions'] and \ + isinstance(tgInfo['permissions'][0], dict): + for assign in tgInfo['permissions']: + if connector['initiator'].lower() in assign.keys(): + for tgportal in tgInfo.get('portals', {}): + properties['target_portal'] = tgportal + break + properties['target_lun'] = \ + assign[connector['initiator'].lower()] + break + + if properties['target_portal'] != '': + properties['target_iqn'] = tgInfo['target_identifier'] + break + else: + if connector['initiator'].lower() in tgInfo['permissions']: + for tgportal in tgInfo.get('portals', {}): + properties['target_portal'] = tgportal + break + + if properties['target_portal'] != '': + properties['target_lun'] = \ + tgInfo['logical_unit_number'] + properties['target_iqn'] = \ + tgInfo['target_identifier'] + break + + if not (ret == 0 or properties['target_portal']): + raise exception.VolumeBackendAPIException( + data='Flexvisor failed to assign volume %s iqn %s.' + % (volume['id'], connector['initiator'])) + + return {'driver_volume_type': 'iscsi', 'data': properties} + + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + ret, output = self.dpl.unassign_vdev( + self._conver_uuid2hex(volume['id']), + connector['initiator']) + + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event( + self.dpl.get_vdev_status, volume['id'], event_uuid) + if status['state'] == 'error': + ret = errno.EFAULT + msg = _('Flexvisor failed to unassign volume %(id)s:' + ' %(status)s.') % {'id': volume['id'], + 'status': status} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _('Flexvisor failed to unassign volume (get event) ' + '%(id)s.') % {'id': volume['id']} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + elif ret != 0: + msg = _('Flexvisor unassign volume failed:%(id)s:' + '%(status)s.') % {'id': volume['id'], 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def get_volume_stats(self, refresh=False): + if refresh: + try: + data = super(DPLISCSIDriver, self).get_volume_stats(refresh) + if data: + data['storage_protocol'] = 'iSCSI' + backend_name = \ + self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = \ + (backend_name or 'DPLISCSIDriver') + self._stats = data + except Exception as exc: + LOG.warning(_('Cannot get volume status ' + '%(exc)%s.') % {'exc': exc}) + return self._stats diff --git a/cinder/volume/drivers/prophetstor/dplcommon.py b/cinder/volume/drivers/prophetstor/dplcommon.py new file mode 100644 index 000000000..7a738c41b --- /dev/null +++ b/cinder/volume/drivers/prophetstor/dplcommon.py @@ -0,0 +1,1061 @@ +# Copyright (c) 2014 ProphetStor, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Implementation of the class of ProphetStor DPL storage adapter of Federator. +""" + +import base64 +import errno +import httplib +import json +import random +import time + +import six + +from cinder import exception +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import loopingcall +from cinder.openstack.common import units +from cinder.volume import driver +from cinder.volume.drivers.prophetstor import options +from cinder.volume.drivers.san import san + +LOG = logging.getLogger(__name__) + +CONNECTION_RETRY = 10 +DISCOVER_SERVER_TYPE = 'dpl' +DPL_BLOCKSTOR = '/dpl_blockstor' +DPL_SYSTEM = '/dpl_system' + +DPL_VER_V1 = 'v1' +DPL_OBJ_POOL = 'dpl_pool' +DPL_OBJ_DISK = 'dpl_disk' +DPL_OBJ_VOLUME = 'dpl_volume' +DPL_OBJ_VOLUMEGROUP = 'dpl_volgroup' +DPL_OBJ_SNAPSHOT = 'cdmi_snapshots' +DPL_OBJ_EXPORT = 'dpl_export' + +DPL_OBJ_REPLICATION = 'cdmi_replication' +DPL_OBJ_TARGET = 'dpl_target' +DPL_OBJ_SYSTEM = 'dpl_system' +DPL_OBJ_SNS = 'sns_table' + + +class DPLCommand(object): + """DPL command interface.""" + + def __init__(self, ip, port, username, password): + self.ip = ip + self.port = port + self.username = username + self.password = password + + def send_cmd(self, method, url, params, expected_status): + """Send command to DPL.""" + connection = None + retcode = 0 + response = {} + data = {} + header = {'Content-Type': 'application/cdmi-container', + 'Accept': 'application/cdmi-container', + 'x-cdmi-specification-version': '1.0.2'} + # base64 encode the username and password + auth = base64.encodestring('%s:%s' + % (self.username, + self.password)).replace('\n', '') + header['Authorization'] = 'Basic %s' % auth + + if not params: + payload = None + else: + try: + payload = json.dumps(params, ensure_ascii=False) + payload.encode('utf-8') + except Exception: + LOG.error(_('JSON encode params error: %s.'), + six.text_type(params)) + retcode = errno.EINVAL + for i in range(CONNECTION_RETRY): + try: + connection = httplib.HTTPSConnection(self.ip, + self.port, + timeout=60) + if connection: + retcode = 0 + break + except IOError as ioerr: + LOG.error(_('Connect to Flexvisor error: %s.'), + six.text_type(ioerr)) + retcode = errno.ENOTCONN + except Exception as e: + LOG.error(_('Connect to Flexvisor failed: %s.'), + six.text_type(e)) + retcode = errno.EFAULT + + retry = CONNECTION_RETRY + while (connection and retry): + try: + connection.request(method, url, payload, header) + except httplib.CannotSendRequest as e: + connection.close() + time.sleep(1) + connection = httplib.HTTPSConnection(self.ip, + self.port, + timeout=60) + retry -= 1 + if connection: + if retry == 0: + retcode = errno.ENOTCONN + else: + retcode = 0 + else: + retcode = errno.ENOTCONN + continue + except Exception as e: + LOG.error(_('Failed to send request: %s.'), + six.text_type(e)) + retcode = errno.EFAULT + break + + if retcode == 0: + try: + response = connection.getresponse() + if response.status == httplib.SERVICE_UNAVAILABLE: + LOG.error(_('The Flexvisor service is unavailable.')) + time.sleep(1) + retry -= 1 + retcode = errno.ENAVAIL + continue + else: + retcode = 0 + break + except httplib.ResponseNotReady as e: + time.sleep(1) + retry -= 1 + retcode = errno.EFAULT + continue + except Exception as e: + LOG.error(_('Failed to get response: %s.'), + six.text_type(e.message)) + retcode = errno.EFAULT + break + + if retcode == 0 and response.status in expected_status and\ + response.status == httplib.NOT_FOUND: + retcode = errno.ENAVAIL + elif retcode == 0 and response.status not in expected_status: + LOG.error(_('%(method)s %(url)s unexpected response status: ' + '%(response)s (expects: %(expects)s).') + % {'method': method, + 'url': url, + 'response': httplib.responses[response.status], + 'expects': expected_status}) + if response.status == httplib.UNAUTHORIZED: + raise exception.NotAuthorized + retcode = errno.EACCES + else: + retcode = errno.EIO + elif retcode == 0 and response.status is httplib.NOT_FOUND: + retcode = errno.ENAVAIL + elif retcode == 0 and response.status is httplib.ACCEPTED: + retcode = errno.EAGAIN + try: + data = response.read() + data = json.loads(data) + except (TypeError, ValueError) as e: + LOG.error(_('Call to json.loads() raised an exception: %s.'), + six.text_type(e)) + retcode = errno.ENOEXEC + except Exception as e: + LOG.error(_('Read response raised an exception: %s.'), + six.text_type(e)) + retcode = errno.ENOEXEC + elif retcode == 0 and \ + response.status in [httplib.OK, httplib.CREATED] and \ + httplib.NO_CONTENT not in expected_status: + try: + data = response.read() + data = json.loads(data) + except (TypeError, ValueError) as e: + LOG.error(_('Call to json.loads() raised an exception: %s.'), + six.text_type(e)) + retcode = errno.ENOEXEC + except Exception as e: + LOG.error(_('Read response raised an exception: %s.'), + six.text_type(e)) + retcode = errno.ENOEXEC + + if connection: + connection.close() + return retcode, data + + +class DPLVolume(object): + + def __init__(self, dplServer, dplPort, dplUser, dplPassword): + self.objCmd = DPLCommand(dplServer, dplPort, dplUser, dplPassword) + + def _execute(self, method, url, params, expected_status): + if self.objCmd: + return self.objCmd.send_cmd(method, url, params, expected_status) + else: + return -1, None + + def _gen_snapshot_url(self, vdevid, snapshotid): + snapshot_url = '/%s/%s/%s' % (vdevid, DPL_OBJ_SNAPSHOT, snapshotid) + return snapshot_url + + def get_server_info(self): + method = 'GET' + url = '/%s/%s/' % (DPL_VER_V1, DPL_OBJ_SYSTEM) + return self._execute(method, url, None, [httplib.OK, httplib.ACCEPTED]) + + def create_vdev(self, volumeID, volumeName, volumeDesc, poolID, volumeSize, + fthinprovision=True, maximum_snapshot=1024, + snapshot_quota=None): + method = 'PUT' + metadata = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID) + + if volumeName is None or volumeName == '': + metadata['display_name'] = volumeID + else: + metadata['display_name'] = volumeName + metadata['display_description'] = volumeDesc + metadata['pool_uuid'] = poolID + metadata['total_capacity'] = volumeSize + metadata['maximum_snapshot'] = 1024 + if snapshot_quota is not None: + metadata['snapshot_quota'] = int(snapshot_quota) + metadata['properties'] = dict(thin_provision=fthinprovision) + params['metadata'] = metadata + return self._execute(method, + url, params, + [httplib.OK, httplib.ACCEPTED, httplib.CREATED]) + + def extend_vdev(self, volumeID, volumeName, volumeDesc, volumeSize, + maximum_snapshot=1024, snapshot_quota=None): + method = 'PUT' + metadata = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID) + + if volumeName is None or volumeName == '': + metadata['display_name'] = volumeID + else: + metadata['display_name'] = volumeName + metadata['display_description'] = volumeDesc + metadata['total_capacity'] = int(volumeSize) + metadata['maximum_snapshot'] = maximum_snapshot + if snapshot_quota is not None: + metadata['snapshot_quota'] = snapshot_quota + params['metadata'] = metadata + return self._execute(method, + url, params, + [httplib.OK, httplib.ACCEPTED, httplib.CREATED]) + + def delete_vdev(self, volumeID, force=False): + method = 'DELETE' + metadata = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID) + + metadata['force'] = force + params['metadata'] = metadata + return self._execute(method, + url, params, + [httplib.OK, httplib.ACCEPTED, httplib.NOT_FOUND, + httplib.NO_CONTENT]) + + def create_vdev_from_snapshot(self, vdevID, vdevDisplayName, vdevDesc, + snapshotID, poolID, fthinprovision=True, + maximum_snapshot=1024, snapshot_quota=None): + method = 'PUT' + metadata = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevID) + metadata['snapshot_operation'] = 'copy' + if vdevDisplayName is None or vdevDisplayName == "": + metadata['display_name'] = vdevID + else: + metadata['display_name'] = vdevDisplayName + metadata['display_description'] = vdevDesc + metadata['pool_uuid'] = poolID + metadata['properties'] = {} + metadata['maximum_snapshot'] = maximum_snapshot + if snapshot_quota: + metadata['snapshot_quota'] = snapshot_quota + metadata['properties'] = dict(thin_provision=fthinprovision) + + params['metadata'] = metadata + params['copy'] = self._gen_snapshot_url(vdevID, snapshotID) + return self._execute(method, + url, params, + [httplib.OK, httplib.ACCEPTED, httplib.CREATED]) + + def spawn_vdev_from_snapshot(self, new_vol_id, src_vol_id, + vol_display_name, description, snap_id): + method = 'PUT' + params = {} + metadata = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, new_vol_id) + + metadata['snapshot_operation'] = 'spawn' + if vol_display_name is None or vol_display_name == '': + metadata['display_name'] = new_vol_id + else: + metadata['display_name'] = vol_display_name + metadata['display_description'] = description + params['metadata'] = metadata + params['copy'] = self._gen_snapshot_url(src_vol_id, snap_id) + + return self._execute(method, url, params, + [httplib.OK, httplib.ACCEPTED, httplib.CREATED]) + + def get_pool(self, poolid): + method = 'GET' + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_POOL, poolid) + + return self._execute(method, url, None, [httplib.OK, httplib.ACCEPTED]) + + def clone_vdev(self, SourceVolumeID, NewVolumeID, poolID, volumeName, + volumeDesc, volumeSize, fthinprovision=True, + maximum_snapshot=1024, snapshot_quota=None): + method = 'PUT' + params = {} + metadata = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, NewVolumeID) + metadata["snapshot_operation"] = "clone" + if volumeName is None or volumeName == '': + metadata["display_name"] = NewVolumeID + else: + metadata["display_name"] = volumeName + metadata["display_description"] = volumeDesc + metadata["pool_uuid"] = poolID + metadata["total_capacity"] = volumeSize + metadata["maximum_snapshot"] = maximum_snapshot + if snapshot_quota: + metadata["snapshot_quota"] = snapshot_quota + metadata["properties"] = dict(thin_provision=fthinprovision) + params["metadata"] = metadata + params["copy"] = SourceVolumeID + + return self._execute(method, + url, params, + [httplib.OK, httplib.CREATED, httplib.ACCEPTED]) + + def create_vdev_snapshot(self, volumeID, snapshotID, snapshotName='', + snapshotDes=''): + method = 'PUT' + metadata = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID) + + if snapshotName is None or snapshotName == '': + metadata['display_name'] = snapshotID + else: + metadata['display_name'] = snapshotName + metadata['display_description'] = snapshotDes + + params['metadata'] = metadata + params['snapshot'] = snapshotID + + return self._execute(method, + url, params, + [httplib.OK, httplib.CREATED, httplib.ACCEPTED]) + + def get_vdev(self, vdevid): + method = 'GET' + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) + + return self._execute(method, + url, None, + [httplib.OK, httplib.ACCEPTED, httplib.NOT_FOUND]) + + def get_vdev_status(self, vdevid, eventid): + method = 'GET' + url = '/%s/%s/%s/?event_uuid=%s' \ + % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid, eventid) + + return self._execute(method, + url, None, + [httplib.OK, httplib.NOT_FOUND]) + + def get_pool_status(self, poolid, eventid): + method = 'GET' + url = '/%s/%s/%s/?event_uuid=%s' \ + % (DPL_VER_V1, DPL_OBJ_POOL, poolid, eventid) + + return self._execute(method, + url, None, + [httplib.OK, httplib.NOT_FOUND]) + + def assign_vdev(self, vdevid, iqn, lunname, portal, lunid=0): + method = 'PUT' + metadata = {} + exports = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) + + metadata['export_operation'] = 'assign' + exports['Network/iSCSI'] = {} + target_info = {} + target_info['logical_unit_number'] = 0 + target_info['logical_unit_name'] = lunname + permissions = [] + portals = [] + portals.append(portal) + permissions.append(iqn) + target_info['permissions'] = permissions + target_info['portals'] = portals + exports['Network/iSCSI'] = target_info + + params['metadata'] = metadata + params['exports'] = exports + + return self._execute(method, + url, params, + [httplib.OK, httplib.ACCEPTED, httplib.CREATED]) + + def assign_vdev_fc(self, vdevid, targetwwpn, initiatorwwpn, lunname, + lunid=-1): + method = 'PUT' + metadata = {} + exports = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) + metadata['export_operation'] = 'assign' + exports['Network/FC'] = {} + target_info = {} + target_info['target_identifier'] = targetwwpn + target_info['logical_unit_number'] = lunid + target_info['logical_unit_name'] = lunname + target_info['permissions'] = initiatorwwpn + exports['Network/FC'] = target_info + + params['metadata'] = metadata + params['exports'] = exports + + return self._execute(method, + url, params, + [httplib.OK, httplib.ACCEPTED, httplib.CREATED]) + + def unassign_vdev(self, vdevid, initiatorIqn, targetIqn=''): + method = 'PUT' + metadata = {} + exports = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) + + metadata['export_operation'] = 'unassign' + params['metadata'] = metadata + + exports['Network/iSCSI'] = {} + exports['Network/iSCSI']['target_identifier'] = targetIqn + permissions = [] + permissions.append(initiatorIqn) + exports['Network/iSCSI']['permissions'] = permissions + + params['exports'] = exports + + return self._execute(method, + url, params, + [httplib.OK, httplib.ACCEPTED, + httplib.NO_CONTENT, httplib.NOT_FOUND]) + + def unassign_vdev_fc(self, vdevid, targetwwpn, initiatorwwpns): + method = 'PUT' + metadata = {} + exports = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) + + metadata['export_operation'] = 'unassign' + params['metadata'] = metadata + + exports['Network/FC'] = {} + exports['Network/FC']['target_identifier'] = targetwwpn + permissions = initiatorwwpns + exports['Network/FC']['permissions'] = permissions + + params['exports'] = exports + + return self._execute(method, + url, params, + [httplib.OK, httplib.ACCEPTED, + httplib.NO_CONTENT, httplib.NOT_FOUND]) + + def delete_vdev_snapshot(self, volumeID, snapshotID): + method = 'DELETE' + url = '/%s/%s/%s/%s/%s/' \ + % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID, + DPL_OBJ_SNAPSHOT, snapshotID) + + return self._execute(method, + url, None, + [httplib.OK, httplib.ACCEPTED, httplib.NO_CONTENT, + httplib.NOT_FOUND]) + + def rollback_vdev(self, vdevid, snapshotid): + method = 'PUT' + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) + + params['copy'] = self._gen_snapshot_url(vdevid, snapshotid) + + return self._execute(method, + url, params, + [httplib.OK, httplib.ACCEPTED]) + + def list_vdev_snapshots(self, vdevid): + method = 'GET' + url = '/%s/%s/%s/%s/' \ + % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid, DPL_OBJ_SNAPSHOT) + + return self._execute(method, + url, None, + [httplib.OK]) + + def create_target(self, targetID, protocol, displayName, targetAddress, + description=''): + method = 'PUT' + params = {} + url = '/%s/%s/%s/' \ + % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID) + params['metadata'] = {} + metadata = params['metadata'] + metadata['type'] = 'target' + metadata['protocol'] = protocol + if displayName is None or displayName == '': + metadata['display_name'] = targetID + else: + metadata['display_name'] = displayName + metadata['display_description'] = description + metadata['address'] = targetAddress + return self._execute(method, url, params, [httplib.OK]) + + def get_target(self, targetID): + method = 'GET' + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID) + return self._execute(method, url, None, [httplib.OK]) + + def delete_target(self, targetID): + method = 'DELETE' + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID) + return self._execute(method, + url, None, + [httplib.OK, httplib.ACCEPTED, httplib.NOT_FOUND]) + + def get_target_list(self, type='target'): + # type = target/initiator + method = 'GET' + if type is None: + url = '/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT) + else: + url = '/%s/%s/?type=%s' % (DPL_VER_V1, DPL_OBJ_EXPORT, type) + return self._execute(method, url, None, [httplib.OK]) + + def get_sns_table(self, wwpn): + method = 'PUT' + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, DPL_OBJ_SNS) + params['metadata'] = {} + params['metadata']['protocol'] = 'fc' + params['metadata']['address'] = str(wwpn) + return self._execute(method, url, params, [httplib.OK]) + + +class DPLCOMMONDriver(driver.VolumeDriver): + """class of dpl storage adapter.""" + VERSION = '2.0' + + def __init__(self, *args, **kwargs): + super(DPLCOMMONDriver, self).__init__(*args, **kwargs) + if self.configuration: + self.configuration.append_config_values(options.DPL_OPTS) + self.configuration.append_config_values(san.san_opts) + + self.dpl = DPLVolume(self.configuration.san_ip, + self.configuration.dpl_port, + self.configuration.san_login, + self.configuration.san_password) + self._stats = {} + + def _convert_size_GB(self, size): + s = round(float(size) / units.Gi, 2) + if s > 0: + return s + else: + return 0 + + def _conver_uuid2hex(self, strID): + if strID: + return strID.replace('-', '') + else: + return None + + def _get_event_uuid(self, output): + event_uuid = "" + if type(output) is not dict: + return -1, event_uuid + + if output.get("metadata") and output["metadata"]: + if output["metadata"].get("event_uuid") and \ + output["metadata"]["event_uuid"]: + event_uuid = output["metadata"]["event_uuid"] + return 0, event_uuid + return -1, event_uuid + + def _wait_event(self, callFun, objuuid, eventid=None): + nRetry = 30 + fExit = False + status = {} + status['state'] = 'error' + status['output'] = {} + while nRetry: + try: + if eventid: + ret, output = callFun( + self._conver_uuid2hex(objuuid), + self._conver_uuid2hex(eventid)) + else: + ret, output = callFun(self._conver_uuid2hex(objuuid)) + + if ret == 0: + if output['completionStatus'] == 'Complete': + fExit = True + status['state'] = 'available' + status['output'] = output + elif output['completionStatus'] == 'Error': + fExit = True + status['state'] = 'error' + raise loopingcall.LoopingCallDone(retvalue=False) + else: + nsleep = random.randint(0, 10) + value = round(float(nsleep) / 10, 2) + time.sleep(value) + elif ret == errno.ENAVAIL: + status['state'] = 'deleted' + fExit = True + else: + nRetry -= 1 + time.sleep(3) + continue + + except Exception as e: + msg = _('Flexvisor failed to get event %(volume)s' + '(%(status)s).') % {'volume': eventid, + 'status': six.text_type(e)} + LOG.error(msg) + raise loopingcall.LoopingCallDone(retvalue=False) + status['state'] = 'error' + fExit = True + + if fExit is True: + break + + return status + + def create_export(self, context, volume): + pass + + def ensure_export(self, context, volume): + pass + + def remove_export(self, context, volume): + pass + + def create_volume(self, volume): + """Create a volume.""" + pool = self.configuration.dpl_pool + ret, output = self.dpl.create_vdev( + self._conver_uuid2hex(volume['id']), + volume.get('display_name', ''), + volume.get('display_description', ''), + pool, + int(volume['size']) * units.Gi, + self.configuration.san_thin_provision) + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event(self.dpl.get_vdev_status, + volume['id'], + event_uuid) + if status['state'] != 'available': + msg = _('Flexvisor failed to create volume %(volume)s: ' + '%(status)s.') % {'volume': volume['id'], + 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _('Flexvisor failed to create volume (get event) ' + '%s.') % (volume['id']) + LOG.error(msg) + raise exception.VolumeBackendAPIException( + data=msg) + elif ret != 0: + msg = _('Flexvisor create volume failed.:%(volumeid)s:' + '%(status)s.') % {'volumeid': volume['id'], 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException( + data=msg) + else: + msg = _('Flexvisor succeed to create volume ' + '%(id)s.') % {'id': volume['id']} + LOG.info(msg) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + pool = self.configuration.dpl_pool + ret, output = self.dpl.create_vdev_from_snapshot( + self._conver_uuid2hex(volume['id']), + volume.get('display_name', ''), + volume.get('display_description', ''), + self._conver_uuid2hex(snapshot['id']), + pool, + self.configuration.san_thin_provision) + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event(self.dpl.get_vdev_status, + volume['id'], + event_uuid) + if status['state'] != 'available': + msg = _('Flexvisor failed to create volume from snapshot ' + '%(id)s:%(status)s.') % {'id': snapshot['id'], + 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException( + data=msg) + else: + msg = _('Flexvisor failed to create volume from snapshot ' + '(failed to get event) ' + '%(id)s.') % {'id': snapshot['id']} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + elif ret != 0: + msg = _('Flexvisor failed to create volume from snapshot ' + '%(id)s: %(status)s.') % {'id': snapshot['id'], + 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException( + data=msg) + else: + msg = _('Flexvisor succeed to create volume %(id)s ' + 'from snapshot.') % {'id': volume['id']} + LOG.info(msg) + + def spawn_volume_from_snapshot(self, volume, snapshot): + """Spawn a REFERENCED volume from a snapshot.""" + ret, output = self.dpl.spawn_vdev_from_snapshot( + self._conver_uuid2hex(volume['id']), + self._conver_uuid2hex(snapshot['volume_id']), + volume.get('display_name', ''), + volume.get('display_description', ''), + self._conver_uuid2hex(snapshot['id'])) + + if ret == errno.EAGAIN: + # its an async process + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event(self.dpl.get_vdev_status, + volume['id'], event_uuid) + if status['state'] != 'available': + msg = _('Flexvisor failed to spawn volume from snapshot ' + '%(id)s:%(status)s.') % {'id': snapshot['id'], + 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _('Flexvisor failed to spawn volume from snapshot ' + '(failed to get event) ' + '%(id)s.') % {'id': snapshot['id']} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + elif ret != 0: + msg = _('Flexvisor failed to create volume from snapshot ' + '%(id)s: %(status)s.') % {'id': snapshot['id'], + 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException( + data=msg) + else: + msg = _('Flexvisor succeed to create volume %(id)s ' + 'from snapshot.') % {'id': volume['id']} + LOG.info(msg) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + pool = self.configuration.dpl_pool + ret, output = self.dpl.clone_vdev( + self._conver_uuid2hex(src_vref['id']), + self._conver_uuid2hex(volume['id']), + pool, + volume.get('display_name', ''), + volume.get('display_description', ''), + int(volume['size']) * units.Gi, + self.configuration.san_thin_provision) + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event(self.dpl.get_vdev_status, + volume['id'], + event_uuid) + if status['state'] != 'available': + msg = _('Flexvisor failed to clone volume %(id)s: ' + '%(status)s.') % {'id': src_vref['id'], + 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _('Flexvisor failed to clone volume (failed to get event' + ') %(id)s.') % {'id': src_vref['id']} + LOG.error(msg) + raise exception.VolumeBackendAPIException( + data=msg) + elif ret != 0: + msg = _('Flexvisor failed to clone volume %(id)s: ' + '%(status)s.') % {'id': src_vref['id'], 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException( + data=msg) + else: + msg = _('Flexvisor succeed to clone ' + 'volume %(id)s.') % {'id': volume['id']} + LOG.info(msg) + + def delete_volume(self, volume): + """Deletes a volume.""" + ret, output = self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) + if ret == errno.EAGAIN: + status = self._wait_event(self.dpl.get_vdev, volume['id']) + if status['state'] == 'error': + msg = _('Flexvisor failed deleting volume %(id)s: ' + '%(status)s.') % {'id': volume['id'], 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + elif ret == errno.ENAVAIL: + ret = 0 + msg = _('Flexvisor volume %(id)s not ' + 'existed.') % {'id': volume['id']} + LOG.info(msg) + elif ret != 0: + msg = _('Flexvisor failed to delete volume %(id)s: ' + '%(status)s.') % {'id': volume['id'], 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException( + data=msg) + + def extend_volume(self, volume, new_size): + ret, output = self.dpl.extend_vdev(self._conver_uuid2hex(volume['id']), + volume.get('display_name', ''), + volume.get('display_description', + ''), + new_size * units.Gi) + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event(self.dpl.get_vdev_status, + volume['id'], + event_uuid) + if status['state'] != 'available': + msg = _('Flexvisor failed to extend volume ' + '%(id)s:%(status)s.') % {'id': volume, + 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException( + data=msg) + else: + msg = _('Flexvisor failed to extend volume ' + '(failed to get event) ' + '%(id)s.') % {'id': volume['id']} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + elif ret != 0: + msg = _('Flexvisor failed to extend volume ' + '%(id)s: %(status)s.') % {'id': volume['id'], + 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException( + data=msg) + else: + msg = _('Flexvisor succeed to extend volume' + ' %(id)s.') % {'id': volume['id']} + LOG.info(msg) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + ret, output = self.dpl.create_vdev_snapshot( + self._conver_uuid2hex(snapshot['volume_id']), + self._conver_uuid2hex(snapshot['id']), + snapshot.get('display_name', ''), + snapshot.get('display_description', '')) + + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event(self.dpl.get_vdev_status, + snapshot['volume_id'], + event_uuid) + if status['state'] != 'available': + msg = _('Flexvisor failed to create snapshot for volume ' + '%(id)s: %(status)s.') % \ + {'id': snapshot['volume_id'], 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _('Flexvisor failed to create snapshot for volume ' + '(failed to get event) %(id)s.') % \ + {'id': snapshot['volume_id']} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + elif ret != 0: + msg = _('Flexvisor failed to create snapshot for volume %(id)s: ' + '%(status)s.') % {'id': snapshot['volume_id'], + 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + ret, output = self.dpl.delete_vdev_snapshot( + self._conver_uuid2hex(snapshot['volume_id']), + self._conver_uuid2hex(snapshot['id'])) + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event(self.dpl.get_vdev_status, + snapshot['volume_id'], + event_uuid) + if status['state'] != 'available': + msg = _('Flexvisor failed to delete snapshot %(id)s: ' + '%(status)s.') % {'id': snapshot['id'], + 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _('Flexvisor failed to delete snapshot (failed to ' + 'get event) %(id)s.') % {'id': snapshot['id']} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + elif ret == errno.ENAVAIL: + msg = _('Flexvisor snapshot %(id)s not existed.') % \ + {'id': snapshot['id']} + LOG.info(msg) + elif ret != 0: + msg = _('Flexvisor failed to delete snapshot %(id)s: ' + '%(status)s.') % {'id': snapshot['id'], 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _('Flexvisor succeed to delete ' + 'snapshot %(id)s.') % {'id': snapshot['id']} + LOG.info(msg) + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If 'refresh' is True, run update the stats first. + """ + if refresh: + self._update_volume_stats() + + return self._stats + + def _update_volume_stats(self, refresh=False): + """Return the current state of the volume service. If 'refresh' is + True, run the update first. + """ + data = {} + totalSize = 0 + availableSize = 0 + + ret, output = self._get_pool_info(self.configuration.dpl_pool) + if ret == 0: + totalSize = int(output['metadata']['total_capacity']) + availableSize = int(output['metadata']['available_capacity']) + else: + totalSize = 0 + availableSize = 0 + + data['volume_backend_name'] = \ + self.configuration.safe_get('volume_backend_name') + + location_info = '%(driver)s:%(host)s:%(volume)s' % { + 'driver': self.__class__.__name__, + 'host': self.configuration.san_ip, + 'volume': self.configuration.dpl_pool + } + + try: + ret, output = self.dpl.get_server_info() + if ret == 0: + data['vendor_name'] = output['metadata']['vendor'] + data['driver_version'] = output['metadata']['version'] + data['storage_protocol'] = 'iSCSI' + data['total_capacity_gb'] = self._convert_size_GB(totalSize) + data['free_capacity_gb'] = self._convert_size_GB(availableSize) + data['reserved_percentage'] = 0 + data['QoS_support'] = False + data['location_info'] = location_info + self._stats = data + except Exception as e: + msg = _('Failed to get server info due to ' + '%(state)s.') % {'state': six.text_type(e)} + LOG.error(msg) + return self._stats + + def do_setup(self, context): + """Any initialization the volume driver does while starting.""" + self.context = context + LOG.info(_('Activate Flexvisor cinder volume driver.')) + + def check_for_setup_error(self): + """Check DPL can connect properly.""" + pass + + def _get_pool_info(self, poolid): + """Query pool information.""" + ret, output = self.dpl.get_pool(poolid) + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event(self.dpl.get_pool_status, poolid, + event_uuid) + if status['state'] != 'available': + msg = _('Flexvisor failed to get pool info %(id)s: ' + '%(status)s.') % {'id': poolid, 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + ret = 0 + output = status.get('output', {}) + else: + LOG.error(_('Flexvisor failed to get pool info ' + '(failed to get event)%s.') % (poolid)) + raise exception.VolumeBackendAPIException( + data="failed to get event") + elif ret != 0: + msg = _('Flexvisor failed to get pool info %(id)s: ' + '%(status)s.') % {'id': poolid, 'status': ret} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = 'Flexvisor succeed to get pool info.' + LOG.debug(msg) + return ret, output diff --git a/cinder/volume/drivers/prophetstor/options.py b/cinder/volume/drivers/prophetstor/options.py new file mode 100644 index 000000000..c9b1f28fe --- /dev/null +++ b/cinder/volume/drivers/prophetstor/options.py @@ -0,0 +1,30 @@ +# Copyright (c) 2014 ProphetStor, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo.config import cfg + + +DPL_OPTS = [ + cfg.StrOpt('dpl_pool', + default='', + help='DPL pool uuid in which DPL volumes are stored.'), + cfg.IntOpt('dpl_port', + default=8357, + help='DPL port number.'), +] + +CONF = cfg.CONF +CONF.register_opts(DPL_OPTS) diff --git a/etc/cinder/cinder.conf.sample b/etc/cinder/cinder.conf.sample index 25c4dcb67..22afd2cb1 100644 --- a/etc/cinder/cinder.conf.sample +++ b/etc/cinder/cinder.conf.sample @@ -1612,6 +1612,18 @@ #nimble_subnet_label=* +# +# Options defined in cinder.volume.drivers.prophetstor.options +# + +# DPL pool uuid in which DPL volumes are stored. (string +# value) +#dpl_pool= + +# DPL port number. (integer value) +#dpl_port=8357 + + # # Options defined in cinder.volume.drivers.pure # -- 2.45.2