From bf257c4e673f39968a562f66567883ed18c83959 Mon Sep 17 00:00:00 2001 From: Erlon Cruz Date: Mon, 24 Mar 2014 11:02:47 -0300 Subject: [PATCH] Implements HDS-Cinder HNAS Drivers This change introduces HDS HNAS iSCSI and NFS Drivers. HNAS NFS certification results: https://gist.github.com/sombrafam/6c73cb823ce75b0538bd HNAS iSCSI certification results: https://gist.github.com/sombrafam/2625987243ec91d53c7a Change-Id: I1a16f76887cf22dcf8ce0ed1dbc0b57327a87616 Implements: blueprint hds-hnas --- cinder/tests/test_hds_iscsi.py | 397 ++++++++++++ cinder/tests/test_hds_nfs.py | 209 +++++++ cinder/volume/drivers/hds/hnas_backend.py | 611 +++++++++++++++++++ cinder/volume/drivers/hds/iscsi.py | 699 ++++++++++++++++++++++ cinder/volume/drivers/hds/nfs.py | 494 +++++++++++++++ etc/cinder/cinder.conf.sample | 17 + etc/cinder/rootwrap.d/volume.filters | 3 + 7 files changed, 2430 insertions(+) create mode 100644 cinder/tests/test_hds_iscsi.py create mode 100644 cinder/tests/test_hds_nfs.py create mode 100644 cinder/volume/drivers/hds/hnas_backend.py create mode 100644 cinder/volume/drivers/hds/iscsi.py create mode 100644 cinder/volume/drivers/hds/nfs.py diff --git a/cinder/tests/test_hds_iscsi.py b/cinder/tests/test_hds_iscsi.py new file mode 100644 index 000000000..d5f572884 --- /dev/null +++ b/cinder/tests/test_hds_iscsi.py @@ -0,0 +1,397 @@ +# Copyright (c) 2014 Hitachi Data Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +""" +Self test for Hitachi Unified Storage (HUS-HNAS) platform. +""" + +import os +import tempfile + +import mock + +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.hds import iscsi + +from cinder.openstack.common import log as logging +LOG = logging.getLogger(__name__) + +HNASCONF = """ + + ssc + True + 172.17.44.15 + supervisor + supervisor + + default + 172.17.39.132 + fs2 + + + silver + 172.17.39.133 + fs2 + + +""" + +# The following information is passed on to tests, when creating a volume +_VOLUME = {'name': 'testvol', 'volume_id': '1234567890', 'size': 128, + 'volume_type': None, 'provider_location': None, 'id': 'abcdefg'} + + +class SimulatedHnasBackend(object): + """Simulation Back end. Talks to HNAS.""" + + # these attributes are shared across object instances + start_lun = 0 + init_index = 0 + target_index = 0 + hlun = 0 + + def __init__(self): + self.type = 'HNAS' + self.out = '' + self.volumes = [] + # iSCSI connections + self.connections = [] + + def deleteVolume(self, name): + LOG.info("delVolume: name %s" % name) + + volume = self.getVolume(name) + if volume: + LOG.info("deleteVolume: deleted name %s provider %s" + % (volume['name'], volume['provider_location'])) + self.volumes.remove(volume) + return True + else: + return False + + def deleteVolumebyProvider(self, provider): + LOG.info("delVolumeP: provider %s" % provider) + + volume = self.getVolumebyProvider(provider) + if volume: + LOG.info("deleteVolumeP: deleted name %s provider %s" + % (volume['name'], volume['provider_location'])) + self.volumes.remove(volume) + return True + else: + return False + + def getVolumes(self): + return self.volumes + + def getVolume(self, name): + LOG.info("getVolume: find by name %s" % name) + + if self.volumes: + for volume in self.volumes: + if str(volume['name']) == name: + LOG.info("getVolume: found name %s provider %s" + % (volume['name'], volume['provider_location'])) + return volume + else: + LOG.info("getVolume: no volumes") + + LOG.info("getVolume: not found") + return None + + def getVolumebyProvider(self, provider): + LOG.info("getVolumeP: find by provider %s" % provider) + + if self.volumes: + for volume in self.volumes: + if str(volume['provider_location']) == provider: + LOG.info("getVolumeP: found name %s provider %s" + % (volume['name'], volume['provider_location'])) + return volume + else: + LOG.info("getVolumeP: no volumes") + + LOG.info("getVolumeP: not found") + return None + + def createVolume(self, name, provider, sizeMiB, comment): + LOG.info("createVolume: name %s provider %s comment %s" + % (name, provider, comment)) + + new_vol = {'additionalStates': [], + 'adminSpace': {'freeMiB': 0, + 'rawReservedMiB': 384, + 'reservedMiB': 128, + 'usedMiB': 128}, + 'baseId': 115, + 'copyType': 1, + 'creationTime8601': '2012-10-22T16:37:57-07:00', + 'creationTimeSec': 1350949077, + 'failedStates': [], + 'id': 115, + 'provider_location': provider, + 'name': name, + 'comment': comment, + 'provisioningType': 1, + 'readOnly': False, + 'sizeMiB': sizeMiB, + 'state': 1, + 'userSpace': {'freeMiB': 0, + 'rawReservedMiB': 41984, + 'reservedMiB': 31488, + 'usedMiB': 31488}, + 'usrSpcAllocLimitPct': 0, + 'usrSpcAllocWarningPct': 0, + 'uuid': '1e7daee4-49f4-4d07-9ab8-2b6a4319e243', + 'wwn': '50002AC00073383D'} + self.volumes.append(new_vol) + + def create_lu(self, cmd, ip0, user, pw, hdp, size, name): + vol_id = name + _out = ("LUN: %d HDP: fs2 size: %s MB, is successfully created" % + (self.start_lun, size)) + self.createVolume(name, vol_id, size, "create-lu") + self.start_lun += 1 + return _out + + def delete_lu(self, cmd, ip0, user, pw, hdp, lun): + _out = "" + id = "myID" + LOG.info("Delete_Lu: check lun %s id %s" % (lun, id)) + + if self.deleteVolumebyProvider(id + '.' + str(lun)): + LOG.warn("Delete_Lu: failed to delete lun %s id %s" % (lun, id)) + return _out + + def create_dup(self, cmd, ip0, user, pw, src_lun, hdp, size, name): + _out = ("LUN: %s HDP: 9 size: %s MB, is successfully created" % + (self.start_lun, size)) + + id = name + LOG.info("HNAS Create_Dup: %d" % self.start_lun) + self.createVolume(name, id + '.' + str(self.start_lun), size, + "create-dup") + self.start_lun += 1 + return _out + + def add_iscsi_conn(self, cmd, ip0, user, pw, lun, hdp, + port, iqn, initiator): + ctl = "" + conn = (self.hlun, lun, initiator, self.init_index, iqn, + self.target_index, ctl, port) + _out = ("H-LUN: %d mapped. LUN: %s, iSCSI Initiator: %s @ index: %d, \ + and Target: %s @ index %d is successfully paired @ CTL: %s, \ + Port: %s" % conn) + self.init_index += 1 + self.target_index += 1 + self.hlun += 1 + LOG.debug("Created connection %d" % self.init_index) + self.connections.append(conn) + return _out + + def del_iscsi_conn(self, cmd, ip0, user, pw, port, iqn, initiator): + + self.connections.pop() + + _out = ("H-LUN: successfully deleted from target") + return _out + + def extend_vol(self, cmd, ip0, user, pw, hdp, lu, size, name): + _out = ("LUN: %s successfully extended to %s MB" % (lu, size)) + id = name + self.out = _out + LOG.info("extend_vol: lu: %s %d -> %s" % (lu, int(size), self.out)) + v = self.getVolumebyProvider(id + '.' + str(lu)) + if v: + v['sizeMiB'] = size + LOG.info("extend_vol: out %s %s" % (self.out, self)) + return _out + + def get_luns(self): + return len(self.alloc_lun) + + def get_conns(self): + return len(self.connections) + + def get_out(self): + return str(self.out) + + def get_version(self, cmd, ver, ip0, user, pw): + self.out = "Array_ID: 18-48-A5-A1-80-13 (3080-G2) " \ + "version: 11.2.3319.09 LU: 256" \ + " RG: 0 RG_LU: 0 Utility_version: 11.1.3225.01" + return self.out + + def get_iscsi_info(self, cmd, ip0, user, pw): + self.out = "CTL: 0 Port: 4 IP: 172.17.39.132 Port: 3260 Link: Up\n" \ + "CTL: 1 Port: 5 IP: 172.17.39.133 Port: 3260 Link: Up" + return self.out + + def get_hdp_info(self, cmd, ip0, user, pw): + self.out = "HDP: 1024 272384 MB 33792 MB 12 % LUs: " \ + "70 Normal fs1\n" \ + "HDP: 1025 546816 MB 73728 MB 13 % LUs: 194 Normal fs2" + return self.out + + def get_targetiqn(self, cmd, ip0, user, pw, id, hdp, secret): + self.out = """iqn.2013-08.cinderdomain:vs61.cindertarget""" + return self.out + + def set_targetsecret(self, cmd, ip0, user, pw, target, hdp, secret): + self.out = """iqn.2013-08.cinderdomain:vs61.cindertarget""" + return self.out + + def get_targetsecret(self, cmd, ip0, user, pw, target, hdp): + self.out = """wGkJhTpXaaYJ5Rv""" + return self.out + + +class HNASiSCSIDriverTest(test.TestCase): + """Test HNAS iSCSI volume driver.""" + def __init__(self, *args, **kwargs): + super(HNASiSCSIDriverTest, self).__init__(*args, **kwargs) + + @mock.patch.object(iscsi, 'factory_bend') + def setUp(self, _factory_bend): + super(HNASiSCSIDriverTest, self).setUp() + + self.backend = SimulatedHnasBackend() + _factory_bend.return_value = self.backend + + (handle, self.config_file) = tempfile.mkstemp('.xml') + os.write(handle, HNASCONF) + os.close(handle) + + self.configuration = mock.Mock(spec=conf.Configuration) + self.configuration.hds_hnas_iscsi_config_file = self.config_file + self.configuration.hds_svc_iscsi_chap_enabled = True + self.driver = iscsi.HDSISCSIDriver(configuration=self.configuration) + self.driver.do_setup("") + self.addCleanup(self._clean) + + def _clean(self): + os.remove(self.config_file) + + def _create_volume(self): + loc = self.driver.create_volume(_VOLUME) + vol = _VOLUME.copy() + vol['provider_location'] = loc['provider_location'] + return vol + + def test_create_volume(self): + loc = self.driver.create_volume(_VOLUME) + self.assertNotEqual(loc, None) + self.assertNotEqual(loc['provider_location'], None) + # cleanup + self.backend.deleteVolumebyProvider(loc['provider_location']) + + def test_get_volume_stats(self): + stats = self.driver.get_volume_stats(True) + self.assertEqual(stats["vendor_name"], "HDS") + self.assertEqual(stats["storage_protocol"], "iSCSI") + self.assertTrue(stats["total_capacity_gb"] > 0) + + def test_delete_volume(self): + vol = self._create_volume() + self.driver.delete_volume(vol) + # should not be deletable twice + prov_loc = self.backend.getVolumebyProvider(vol['provider_location']) + self.assertTrue(prov_loc == None) + + def test_extend_volume(self): + vol = self._create_volume() + new_size = _VOLUME['size'] * 2 + self.driver.extend_volume(vol, new_size) + # cleanup + self.backend.deleteVolumebyProvider(vol['provider_location']) + + @mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol') + def test_create_snapshot(self, m_id_to_vol): + vol = self._create_volume() + m_id_to_vol.return_value = vol + svol = vol.copy() + svol['volume_size'] = svol['size'] + loc = self.driver.create_snapshot(svol) + self.assertNotEqual(loc, None) + svol['provider_location'] = loc['provider_location'] + # cleanup + self.backend.deleteVolumebyProvider(svol['provider_location']) + self.backend.deleteVolumebyProvider(vol['provider_location']) + + @mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol') + def test_create_clone(self, m_id_to_vol): + + src_vol = self._create_volume() + m_id_to_vol.return_value = src_vol + src_vol['volume_size'] = src_vol['size'] + + dst_vol = self._create_volume() + dst_vol['volume_size'] = dst_vol['size'] + + loc = self.driver.create_cloned_volume(dst_vol, src_vol) + self.assertNotEqual(loc, None) + # cleanup + self.backend.deleteVolumebyProvider(src_vol['provider_location']) + self.backend.deleteVolumebyProvider(loc['provider_location']) + + @mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol') + def test_delete_snapshot(self, m_id_to_vol): + svol = self._create_volume() + + lun = svol['provider_location'] + m_id_to_vol.return_value = svol + self.driver.delete_snapshot(svol) + self.assertTrue(self.backend.getVolumebyProvider(lun) == None) + + def test_create_volume_from_snapshot(self): + svol = self._create_volume() + svol['volume_size'] = svol['size'] + vol = self.driver.create_volume_from_snapshot(_VOLUME, svol) + self.assertNotEqual(vol, None) + # cleanup + self.backend.deleteVolumebyProvider(svol['provider_location']) + self.backend.deleteVolumebyProvider(vol['provider_location']) + + @mock.patch.object(iscsi.HDSISCSIDriver, '_update_vol_location') + def test_initialize_connection(self, m_update_vol_location): + connector = {} + connector['initiator'] = 'iqn.1993-08.org.debian:01:11f90746eb2' + connector['host'] = 'dut_1.lab.hds.com' + vol = self._create_volume() + conn = self.driver.initialize_connection(vol, connector) + self.assertTrue('3260' in conn['data']['target_portal']) + # cleanup + self.backend.deleteVolumebyProvider(vol['provider_location']) + + @mock.patch.object(iscsi.HDSISCSIDriver, '_update_vol_location') + def test_terminate_connection(self, m_update_vol_location): + connector = {} + connector['initiator'] = 'iqn.1993-08.org.debian:01:11f90746eb2' + connector['host'] = 'dut_1.lab.hds.com' + + vol = self._create_volume() + vol['provider_location'] = "portal," +\ + connector['initiator'] +\ + ",18-48-A5-A1-80-13.0,ctl,port,hlun" + + conn = self.driver.initialize_connection(vol, connector) + num_conn_before = self.backend.get_conns() + self.driver.terminate_connection(vol, conn) + num_conn_after = self.backend.get_conns() + self.assertNotEqual(num_conn_before, num_conn_after) + # cleanup + self.backend.deleteVolumebyProvider(vol['provider_location']) diff --git a/cinder/tests/test_hds_nfs.py b/cinder/tests/test_hds_nfs.py new file mode 100644 index 000000000..c34318ec1 --- /dev/null +++ b/cinder/tests/test_hds_nfs.py @@ -0,0 +1,209 @@ +# Copyright (c) 2014 Hitachi Data Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import os +import tempfile + +import mock + +from cinder.openstack.common import log as logging +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.hds import nfs + +LOG = logging.getLogger(__name__) + +SHARESCONF = """172.17.39.132:/cinder +172.17.39.133:/cinder""" + +HNASCONF = """ + + ssc + 172.17.44.15 + supervisor + supervisor + + default + 172.17.39.132:/cinder + + + silver + 172.17.39.133:/cinder + + +""" + +# The following information is passed on to tests, when creating a volume +_SHARE = '172.17.39.132:/cinder' +_EXPORT = '/cinder' +_VOLUME = {'name': 'volume-bcc48c61-9691-4e5f-897c-793686093190', + 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093190', 'size': 128, + 'volume_type': None, 'provider_location': None, + 'id': 'bcc48c61-9691-4e5f-897c-793686093190'} +_SNAPVOLUME = {'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc', + 'id': '51dd4-8d8a-4aa9-9176-086c9d89e7fc', 'size': 128, + 'volume_type': None, 'provider_location': None, + 'volume_size': 128, + 'volume_name': 'volume-bcc48c61-9691-4e5f-897c-793686093190', + 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093190'} + + +class SimulatedHnasBackend(object): + """Simulation Back end. Talks to HNAS.""" + + # these attributes are shared across object instances + start_lun = 0 + + def __init__(self): + self.type = 'HNAS' + self.out = '' + + def file_clone(self, cmd, ip0, user, pw, fslabel, source_path, + target_path): + _out = "" + LOG.info("Clone: %s -> %s" % (source_path, target_path)) + return _out + + def get_version(self, ver, cmd, ip0, user, pw): + self.out = "Array_ID: 18-48-A5-A1-80-13 (3080-G2) " \ + "version: 11.2.3319.09 LU: 256 " \ + "RG: 0 RG_LU: 0 Utility_version: 11.1.3225.01" + return self.out + + def get_hdp_info(self, ip0, user, pw): + self.out = "HDP: 1024 272384 MB 33792 MB 12 % LUs: 70 " \ + "Normal fs1\n" \ + "HDP: 1025 546816 MB 73728 MB 13 % LUs: 194 " \ + "Normal fs2" + return self.out + + def get_nfs_info(self, cmd, ip0, user, pw): + self.out = "Export: /cinder Path: /volumes HDP: fs1 FSID: 1024 " \ + "EVS: 1 IPS: 172.17.39.132\n" \ + "Export: /cinder Path: /volumes HDP: fs2 FSID: 1025 " \ + "EVS: 1 IPS: 172.17.39.133" + return self.out + + +class HDSNFSDriverTest(test.TestCase): + """Test HNAS NFS volume driver.""" + + def __init__(self, *args, **kwargs): + super(HDSNFSDriverTest, self).__init__(*args, **kwargs) + + @mock.patch.object(nfs, 'factory_bend') + def setUp(self, m_factory_bend): + super(HDSNFSDriverTest, self).setUp() + + self.backend = SimulatedHnasBackend() + m_factory_bend.return_value = self.backend + + (handle, self.config_file) = tempfile.mkstemp('.xml') + os.write(handle, HNASCONF) + os.close(handle) + (handle, self.shares_file) = tempfile.mkstemp('') + os.write(handle, SHARESCONF) + os.close(handle) + + self.configuration = mock.Mock(spec=conf.Configuration) + self.configuration.hds_hnas_nfs_config_file = self.config_file + self.configuration.nfs_shares_config = self.shares_file + self.configuration.nfs_mount_point_base = '/opt/stack/cinder/mnt' + self.configuration.nfs_mount_options = None + + self.driver = nfs.HDSNFSDriver(configuration=self.configuration) + self.driver.do_setup("") + self.addCleanup(self._clean) + + def _clean(self): + os.remove(self.config_file) + os.remove(self.shares_file) + super(HDSNFSDriverTest, self).tearDown() + + @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol') + @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') + @mock.patch.object(nfs.HDSNFSDriver, '_get_export_path') + @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location') + def test_create_snapshot(self, m_get_volume_location, m_get_export_path, + m_get_provider_location, m_id_to_vol): + vol = _VOLUME.copy() + svol = _SNAPVOLUME.copy() + + m_id_to_vol(vol['volume_id']).return_value = vol + m_id_to_vol(svol['id']).return_value = svol + + m_get_provider_location(vol['volume_id']).return_value = _SHARE + m_get_provider_location(svol['id']).return_value = _SHARE + + m_get_volume_location(svol['volume_id']).return_value = _SHARE + m_get_export_path(svol['volume_id']).return_value = _EXPORT + + loc = self.driver.create_snapshot(svol) + self.assertNotEqual(loc, None) + + @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol') + @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') + @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location') + def test_create_cloned_volume(self, m_get_volume_location, + m_get_provider_location, m_id_to_vol): + vol = _VOLUME.copy() + svol = _SNAPVOLUME.copy() + + m_id_to_vol(vol['id']).return_value = vol + m_id_to_vol(svol['id']).return_value = svol + + m_get_provider_location(vol['id']).return_value = _SHARE + m_get_provider_location(svol['id']).return_value = _SHARE + + m_get_volume_location(svol['id']).return_value = _SHARE + + loc = self.driver.create_cloned_volume(vol, svol) + self.assertNotEqual(loc, None) + + @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol') + @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') + @mock.patch.object(nfs.HDSNFSDriver, '_volume_not_present') + def test_delete_snapshot(self, m_volume_not_present, + m_get_provider_location, m_id_to_vol): + svol = _SNAPVOLUME.copy() + + m_id_to_vol(svol['volume_id']).return_value = svol + m_get_provider_location(svol['volume_id']).return_value = _SHARE + + m_volume_not_present.return_value = True + + self.driver.delete_snapshot(svol) + self.assertEqual(svol['provider_location'], None) + + @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol') + @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') + @mock.patch.object(nfs.HDSNFSDriver, '_get_export_path') + @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location') + def test_create_volume_from_snapshot(self, m_get_volume_location, + m_get_export_path, + m_get_provider_location, m_id_to_vol): + vol = _VOLUME.copy() + svol = _SNAPVOLUME.copy() + + m_id_to_vol(svol['volume_id']).return_value = vol + m_id_to_vol(svol['id']).return_value = svol + + m_get_provider_location(svol['id']).return_value = _SHARE + m_get_export_path(svol['volume_id']).return_value = _EXPORT + m_get_volume_location(svol['volume_id']).return_value = _SHARE + + loc = self.driver.create_volume_from_snapshot(_VOLUME, svol) + self.assertNotEqual(loc, None) diff --git a/cinder/volume/drivers/hds/hnas_backend.py b/cinder/volume/drivers/hds/hnas_backend.py new file mode 100644 index 000000000..c28c3f650 --- /dev/null +++ b/cinder/volume/drivers/hds/hnas_backend.py @@ -0,0 +1,611 @@ +# Copyright (c) 2014 Hitachi Data Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +""" +Hitachi Unified Storage (HUS-HNAS) platform. Backend operations. +""" + +from cinder.openstack.common import log as logging +from cinder import units +from cinder import utils +import re + +LOG = logging.getLogger("cinder.volume.driver") + + +class HnasBackend(): + """Back end. Talks to HUS-HNAS.""" + def get_version(self, cmd, ver, ip0, user, pw): + """Gets version information from the storage unit + + :param ver: string driver version + :param ip0: string IP address of controller + :param user: string user authentication for array + :param pw: string password authentication for array + :returns: formated string with version information + """ + out, err = utils.execute(cmd, + "-version", + check_exit_code=True) + util = out.split()[1] + out, err = utils.execute(cmd, + '-u', user, '-p', pw, ip0, + "cluster-getmac", + check_exit_code=True) + hardware = out.split()[2] + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, 'ver', + check_exit_code=True) + lines = out.split('\n') + + model = "" + for line in lines: + if 'Model:' in line: + model = line.split()[1] + if 'Software:' in line: + ver = line.split()[1] + + out = "Array_ID: %s (%s) version: %s LU: 256 RG: 0 RG_LU: 0 \ + Utility_version: %s" % (hardware, model, ver, util) + + LOG.debug('get_version: ' + out + ' -- ' + err) + return out + + def get_iscsi_info(self, cmd, ip0, user, pw): + """Gets IP addresses for EVSs, use EVSID as controller. + + :param ip0: string IP address of controller + :param user: string user authentication for array + :param pw: string password authentication for array + :returns: formated string with iSCSI information + """ + + out, err = utils.execute(cmd, + '-u', user, '-p', pw, ip0, + 'evsipaddr', '-l', + check_exit_code=True) + lines = out.split('\n') + + newout = "" + for line in lines: + if 'evs' in line and 'admin' not in line: + inf = line.split() + (evsnum, evsname, ip) = (inf[1], inf[2], inf[3]) + newout += "CTL: %s Port: 0 IP: %s Port: 3260 Link: Up\n" \ + % (evsnum, ip) + + LOG.debug('get_iscsi_info: ' + out + ' -- ' + err) + return newout + + def get_hdp_info(self, cmd, ip0, user, pw): + """Gets the list of filesystems and fsids. + + :param ip0: string IP address of controller + :param user: string user authentication for array + :param pw: string password authentication for array + :returns: formated string with filesystems and fsids + """ + + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, 'df', '-a', + check_exit_code=True) + lines = out.split('\n') + + newout = "" + for line in lines: + if 'Not mounted' in line: + continue + if 'GB' in line or 'TB' in line: + inf = line.split() + (fsid, fslabel, evsnum, capacity, used, perstr) = \ + (inf[0], inf[1], inf[2], inf[3], inf[5], inf[7]) + (availunit, usedunit) = (inf[4], inf[6]) + if usedunit == 'GB': + usedmultiplier = units.KiB + else: + usedmultiplier = units.MiB + if availunit == 'GB': + availmultiplier = units.KiB + else: + availmultiplier = units.MiB + m = re.match("\((\d+)\%\)", perstr) + if m: + percent = m.group(1) + else: + percent = 0 + newout += "HDP: %s %d MB %d MB %d %% LUs: 256 Normal %s\n" \ + % (fsid, int(float(capacity) * availmultiplier), + int(float(used) * usedmultiplier), + int(percent), fslabel) + + LOG.debug('get_hdp_info: ' + newout + ' -- ' + err) + return newout + + def _get_evs(self, cmd, ip0, user, pw, fsid): + """Gets the EVSID for the named filesystem.""" + + out, err = utils.execute(cmd, + '-u', user, '-p', pw, ip0, + "evsfs", "list", + check_exit_code=True) + LOG.debug('get_evs: out ' + out) + + lines = out.split('\n') + for line in lines: + inf = line.split() + if fsid in line and (fsid == inf[0] or fsid == inf[1]): + return inf[3] + + LOG.warn('get_evs: ' + out + ' -- ' + 'No info for ' + fsid) + return 0 + + def _get_evsips(self, cmd, ip0, user, pw, evsid): + """Gets the EVS IPs for the named filesystem.""" + + out, err = utils.execute(cmd, + '-u', user, '-p', pw, ip0, + 'evsipaddr', '-e', evsid, + check_exit_code=True) + + iplist = "" + lines = out.split('\n') + for line in lines: + inf = line.split() + if 'evs' in line: + iplist += inf[3] + ' ' + + LOG.debug('get_evsips: ' + iplist) + return iplist + + def _get_fsid(self, cmd, ip0, user, pw, fslabel): + """Gets the FSID for the named filesystem.""" + + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, 'evsfs', 'list', + check_exit_code=True) + LOG.debug('get_fsid: out ' + out) + + lines = out.split('\n') + for line in lines: + inf = line.split() + if fslabel in line and fslabel == inf[1]: + LOG.debug('get_fsid: ' + line) + return inf[0] + + LOG.warn('get_fsid: ' + out + ' -- ' + 'No infor for ' + fslabel) + return 0 + + def get_nfs_info(self, cmd, ip0, user, pw): + """Gets information on each NFS export. + + :param ip0: string IP address of controller + :param user: string user authentication for array + :param pw: string password authentication for array + :returns: formated string + """ + + out, err = utils.execute(cmd, + '-u', user, '-p', pw, ip0, + 'for-each-evs', '-q', + 'nfs-export', 'list', + check_exit_code=True) + + lines = out.split('\n') + newout = "" + export = "" + path = "" + for line in lines: + inf = line.split() + if 'Export name' in line: + export = inf[2] + if 'Export path' in line: + path = inf[2] + if 'File system info' in line: + fs = "" + if 'File system label' in line: + fs = inf[3] + if 'Transfer setting' in line and fs != "": + fsid = self._get_fsid(cmd, ip0, user, pw, fs) + evsid = self._get_evs(cmd, ip0, user, pw, fsid) + ips = self._get_evsips(cmd, ip0, user, pw, evsid) + newout += "Export: %s Path: %s HDP: %s FSID: %s \ + EVS: %s IPS: %s\n" \ + % (export, path, fs, fsid, evsid, ips) + fs = "" + + LOG.debug('get_nfs_info: ' + newout + ' -- ' + err) + return newout + + def create_lu(self, cmd, ip0, user, pw, hdp, size, name): + """Creates a new Logical Unit. + + If the operation can not be performed for some reason, utils.execute() + throws an error and aborts the operation. Used for iSCSI only + + :param ip0: string IP address of controller + :param user: string user authentication for array + :param pw: string password authentication for array + :param hdp: data Pool the logical unit will be created + :param size: Size (Mb) of the new logical unit + :param name: name of the logical unit + :returns: formated string with 'LUN %d HDP: %d size: %s MB, is + successfully created' + """ + + _evsid = self._get_evs(cmd, ip0, user, pw, hdp) + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, "console-context", + "--evs", _evsid, + 'iscsi-lu', 'add', "-e", + name, hdp, + '/.cinder/' + name + '.iscsi', + size + 'M', + check_exit_code=True) + + out = "LUN %s HDP: %s size: %s MB, is successfully created" \ + % (name, hdp, size) + + LOG.debug('create_lu: ' + out) + return out + + def delete_lu(self, cmd, ip0, user, pw, hdp, lun): + """Delete an logical unit. Used for iSCSI only + + :param ip0: string IP address of controller + :param user: string user authentication for array + :param pw: string password authentication for array + :param hdp: data Pool of the logical unit + :param lun: id of the logical unit being deleted + :returns: formated string 'Logical unit deleted successfully.' + """ + + _evsid = self._get_evs(cmd, ip0, user, pw, hdp) + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, "console-context", + "--evs", _evsid, + 'iscsi-lu', 'del', '-d', + '-f', lun, + check_exit_code=True) + + LOG.debug('delete_lu: ' + out + ' -- ' + err) + return out + + def create_dup(self, cmd, ip0, user, pw, src_lun, hdp, size, name): + """Clones a volume + + Clone primitive used to support all iSCSI snapshot/cloning functions. + Used for iSCSI only. + + :param ip0: string IP address of controller + :param user: string user authentication for array + :param pw: string password authentication for array + :param hdp: data Pool of the logical unit + :param src_lun: id of the logical unit being deleted + :param size: size of the LU being cloned. Only for logging purposes + :returns: formated string + """ + + _evsid = self._get_evs(cmd, ip0, user, pw, hdp) + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, "console-context", + "--evs", _evsid, + 'iscsi-lu', 'clone', '-e', + src_lun, name, + '/.cinder/' + name + '.iscsi', + check_exit_code=True) + + out = "LUN %s HDP: %s size: %s MB, is successfully created" \ + % (name, hdp, size) + + LOG.debug('create_dup: ' + out + ' -- ' + err) + return out + + def file_clone(self, cmd, ip0, user, pw, fslabel, src, name): + """Clones NFS files to a new one named 'name' + + Clone primitive used to support all NFS snapshot/cloning functions. + + :param ip0: string IP address of controller + :param user: string user authentication for array + :param pw: string password authentication for array + :param fslabel: file system label of the new file + :param src: source file + :param name: target path of the new created file + :returns: formated string + """ + + _fsid = self._get_fsid(cmd, ip0, user, pw, fslabel) + _evsid = self._get_evs(cmd, ip0, user, pw, _fsid) + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, "console-context", + "--evs", _evsid, + 'file-clone-create', '-f', fslabel, + src, name, + check_exit_code=True) + + out = "LUN %s HDP: %s Clone: %s -> %s" % (name, _fsid, src, name) + + LOG.debug('file_clone: ' + out + ' -- ' + err) + return out + + def extend_vol(self, cmd, ip0, user, pw, hdp, lun, new_size, name): + """Extend a iSCSI volume. + + :param ip0: string IP address of controller + :param user: string user authentication for array + :param pw: string password authentication for array + :param hdp: data Pool of the logical unit + :param lun: id of the logical unit being extended + :param new_size: new size of the LU + :param name: formated string + """ + + _evsid = self._get_evs(cmd, ip0, user, pw, hdp) + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, "console-context", + "--evs", _evsid, + 'iscsi-lu', 'expand', + name, new_size + 'M', + check_exit_code=True) + + out = ("LUN: %s successfully extended to %s MB" % (name, new_size)) + + LOG.debug('extend_vol: ' + out) + return out + + def add_iscsi_conn(self, cmd, ip0, user, pw, lun, hdp, + port, iqn, initiator): + """Setup the lun on on the specified target port + + :param ip0: string IP address of controller + :param user: string user authentication for array + :param pw: string password authentication for array + :param lun: id of the logical unit being extended + :param hdp: data pool of the logical unit + :param port: iSCSI port + :param iqn: iSCSI qualified name + :param initiator: initiator address + """ + + _evsid = self._get_evs(cmd, ip0, user, pw, hdp) + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, "console-context", + "--evs", _evsid, + 'iscsi-target', 'list', iqn, + check_exit_code=True) + + # even though ssc uses the target alias, need to return the full iqn + fulliqn = "" + lines = out.split('\n') + for line in lines: + if 'Globally unique name' in line: + fulliqn = line.split()[3] + + # find first free hlun + hlun = 0 + for line in lines: + if line.startswith(' '): + lunline = line.split()[0] + vol = line.split()[1] + if lunline[0].isdigit(): + # see if already mounted + if vol[:29] == lun[:29]: + LOG.info('lun: %s already mounted %s' % (lun, lunline)) + conn = (int(lunline), lun, initiator, hlun, fulliqn, + hlun, hdp, port) + out = "H-LUN: %d alreadymapped LUN: %s, iSCSI \ + Initiator: %s @ index: %d, and Target: %s \ + @ index %d is successfully paired @ CTL: \ + %s, Port: %s" % conn + LOG.debug('add_iscsi_conn: returns ' + out) + return out + + if int(lunline) == hlun: + hlun += 1 + if int(lunline) > hlun: + # found a hole + break + + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, "console-context", + "--evs", _evsid, + 'iscsi-target', 'addlu', + iqn, lun, hlun, + check_exit_code=True) + + conn = (int(hlun), lun, initiator, int(hlun), fulliqn, int(hlun), + hdp, port) + out = "H-LUN: %d mapped LUN: %s, iSCSI Initiator: %s \ + @ index: %d, and Target: %s @ index %d is \ + successfully paired @ CTL: %s, Port: %s" % conn + + LOG.debug('add_iscsi_conn: returns ' + out) + return out + + def del_iscsi_conn(self, cmd, ip0, user, pw, evsid, iqn, hlun): + """Remove the lun on on the specified target port + + :param ip0: string IP address of controller + :param user: string user authentication for array + :param pw: string password authentication for array + :param evsid: EVSID for the file system + :param iqn: iSCSI qualified name + :param hlun: logical unit id + :return: formated string + """ + + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, "console-context", + "--evs", evsid, + 'iscsi-target', 'list', iqn, + check_exit_code=True) + + lines = out.split('\n') + out = ("H-LUN: %d already deleted from target %s" % (int(hlun), iqn)) + # see if lun is already detached + for line in lines: + if line.startswith(' '): + lunline = line.split()[0] + if lunline[0].isdigit() and lunline == hlun: + out = "" + break + + if out != "": + # hlun wasn't found + LOG.info('del_iscsi_conn: hlun not found' + out) + return out + + # remove the LU from the target + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, "console-context", + "--evs", evsid, + 'iscsi-target', 'dellu', + '-f', iqn, hlun, + check_exit_code=True) + + out = "H-LUN: %d successfully deleted from target %s" \ + % (int(hlun), iqn) + + LOG.debug('del_iscsi_conn: ' + out + ' -- ') + return out + + def get_targetiqn(self, cmd, ip0, user, pw, targetalias, hdp, secret): + """Obtain the targets full iqn + + Return the target's full iqn rather than its alias. + + :param ip0: string IP address of controller + :param user: string user authentication for array + :param pw: string password authentication for array + :param targetalias: alias of the target + :param hdp: data pool of the logical unit + :param secret: CHAP secret of the target + :return: string with full IQN + """ + + _evsid = self._get_evs(cmd, ip0, user, pw, hdp) + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, "console-context", + "--evs", _evsid, + 'iscsi-target', 'list', targetalias, + check_exit_code=True) + + if "does not exist" in out: + if secret == "": + secret = '""' + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, "console-context", + "--evs", _evsid, + 'iscsi-target', 'add', + targetalias, secret, + check_exit_code=True) + else: + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, "console-context", + "--evs", _evsid, + 'iscsi-target', 'add', + targetalias, secret, + check_exit_code=True) + + lines = out.split('\n') + # returns the first iqn + for line in lines: + if 'Alias' in line: + fulliqn = line.split()[2] + return fulliqn + + def set_targetsecret(self, cmd, ip0, user, pw, targetalias, hdp, secret): + """Sets the chap secret for the specified target. + :param ip0: string IP address of controller + :param user: string user authentication for array + :param pw: string password authentication for array + :param targetalias: alias of the target + :param hdp: data pool of the logical unit + :param secret: CHAP secret of the target + """ + + _evsid = self._get_evs(cmd, ip0, user, pw, hdp) + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, "console-context", + "--evs", _evsid, + 'iscsi-target', 'list', + targetalias, + check_exit_code=False) + + if "does not exist" in out: + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, "console-context", + "--evs", _evsid, + 'iscsi-target', 'add', + targetalias, secret, + check_exit_code=True) + else: + LOG.info('targetlist: ' + targetalias + ' -- ' + out) + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, "console-context", + "--evs", _evsid, + 'iscsi-target', 'mod', + '-s', secret, '-a', 'enable', + targetalias, + check_exit_code=True) + + def get_targetsecret(self, cmd, ip0, user, pw, targetalias, hdp): + """Returns the chap secret for the specified target. + :param ip0: string IP address of controller + :param user: string user authentication for array + :param pw: string password authentication for array + :param targetalias: alias of the target + :param hdp: data pool of the logical unit + :return secret: CHAP secret of the target + """ + + _evsid = self._get_evs(cmd, ip0, user, pw, hdp) + out, err = utils.execute(cmd, + '-u', user, '-p', pw, + ip0, "console-context", + "--evs", _evsid, + 'iscsi-target', 'list', targetalias, + check_exit_code=True) + + enabled = "" + secret = "" + lines = out.split('\n') + for line in lines: + if 'Secret' in line: + secret = line.split()[2] + if 'Authentication' in line: + enabled = line.split()[2] + + if enabled == 'Enabled': + return secret diff --git a/cinder/volume/drivers/hds/iscsi.py b/cinder/volume/drivers/hds/iscsi.py new file mode 100644 index 000000000..24248e3e9 --- /dev/null +++ b/cinder/volume/drivers/hds/iscsi.py @@ -0,0 +1,699 @@ +# Copyright (c) 2014 Hitachi Data Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +""" +iSCSI Cinder Volume driver for Hitachi Unified Storage (HUS-HNAS) platform. +""" + +from oslo.config import cfg +from xml.etree import ElementTree as ETree + +from cinder import exception +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder import units +from cinder import utils +from cinder.volume import driver +from cinder.volume.drivers.hds.hnas_backend import HnasBackend + + +HDS_HNAS_ISCSI_VERSION = '1.0.0' + +LOG = logging.getLogger(__name__) + +iSCSI_OPTS = [ + cfg.StrOpt('hds_hnas_iscsi_config_file', + default='/opt/hds/hnas/cinder_iscsi_conf.xml', + help='configuration file for HDS iSCSI cinder plugin')] + +CONF = cfg.CONF +CONF.register_opts(iSCSI_OPTS) + +HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc', 'chap_enabled': 'True'} + + +def factory_bend(type): + return HnasBackend() + + +def _loc_info(loc): + """Parse info from location string.""" + + LOG.info("Parse_loc: %s" % loc) + info = {} + tup = loc.split(',') + if len(tup) < 5: + info['id_lu'] = tup[0].split('.') + return info + info['id_lu'] = tup[2].split('.') + info['tgt'] = tup + return info + + +def _xml_read(root, element, check=None): + """Read an xml element.""" + + try: + val = root.findtext(element) + LOG.info(_("%(element)s: %(val)s") + % {'element': element, + 'val': val}) + if val: + return val.strip() + if check: + raise exception.ParameterNotFound(param=element) + return None + except ETree.ParseError: + if check: + with excutils.save_and_reraise_exception(): + LOG.error(_("XML exception reading parameter: %s") % element) + else: + LOG.info(_("XML exception reading parameter: %s") % element) + return None + + +def _read_config(xml_config_file): + """Read hds driver specific xml config file.""" + + try: + root = ETree.parse(xml_config_file).getroot() + except Exception: + raise exception.NotFound(message='config file not found: ' + + xml_config_file) + + # mandatory parameters + config = {} + arg_prereqs = ['mgmt_ip0', 'username', 'password'] + for req in arg_prereqs: + config[req] = _xml_read(root, req, 'check') + + # optional parameters + for opt in ['hnas_cmd', 'chap_enabled']: + config[opt] = _xml_read(root, opt) or\ + HNAS_DEFAULT_CONFIG[opt] + + config['hdp'] = {} + config['services'] = {} + + # min one needed + for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']: + if _xml_read(root, svc) is None: + continue + service = {'label': svc} + + # none optional + for arg in ['volume_type', 'hdp', 'iscsi_ip']: + service[arg] = _xml_read(root, svc + '/' + arg, 'check') + config['services'][service['volume_type']] = service + config['hdp'][service['hdp']] = service['hdp'] + + # at least one service required! + if config['services'].keys() is None: + raise exception.ParameterNotFound(param="No service found") + + return config + + +class HDSISCSIDriver(driver.ISCSIDriver): + """HDS HNAS volume driver.""" + + def __init__(self, *args, **kwargs): + """Initialize, read different config parameters.""" + + super(HDSISCSIDriver, self).__init__(*args, **kwargs) + self.driver_stats = {} + self.context = {} + self.configuration.append_config_values(iSCSI_OPTS) + self.config = _read_config( + self.configuration.hds_hnas_iscsi_config_file) + self.type = 'HNAS' + + self.platform = self.type.lower() + LOG.info(_("Backend type: %s") % self.type) + self.bend = factory_bend(self.type) + + def _array_info_get(self): + """Get array parameters.""" + + out = self.bend.get_version(self.config['hnas_cmd'], + HDS_HNAS_ISCSI_VERSION, + self.config['mgmt_ip0'], + self.config['username'], + self.config['password']) + inf = out.split() + + return inf[1], 'hnas_' + inf[1], inf[6] + + def _get_iscsi_info(self): + """Validate array iscsi parameters.""" + + out = self.bend.get_iscsi_info(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password']) + lines = out.split('\n') + + # dict based on iSCSI portal ip addresses + conf = {} + for line in lines: + # only record up links + if 'CTL' in line and 'Up' in line: + inf = line.split() + (ctl, port, ip, ipp) = (inf[1], inf[3], inf[5], inf[7]) + conf[ip] = {} + conf[ip]['ctl'] = ctl + conf[ip]['port'] = port + conf[ip]['iscsi_port'] = ipp + msg = _('portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s') + LOG.debug(msg + % {'ip': ip, + 'ipp': ipp, + 'ctl': ctl, + 'port': port}) + + return conf + + def _get_service(self, volume): + """Get the available service parameters for a given volume using + its type. + :param volume: dictionary volume reference + """ + + label = None + if volume['volume_type']: + label = volume['volume_type']['name'] + + label = label or 'default' + if label not in self.config['services'].keys(): + # default works if no match is found + label = 'default' + LOG.info(_("Using default: instead of %s") % label) + LOG.info(_("Available services: %s") + % self.config['services'].keys()) + + if label in self.config['services'].keys(): + svc = self.config['services'][label] + # HNAS - one time lookup + # see if the client supports CHAP authentication and if + # iscsi_secret has already been set, retrieve the secret if + # available, otherwise generate and store + if self.config['chap_enabled'] == 'True': + # it may not exist, create and set secret + if 'iscsi_secret' not in svc: + LOG.info(_("Retrieving secret for service: %s") + % label) + + out = self.bend.get_targetsecret(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password'], + 'cinder-' + label, + svc['hdp']) + svc['iscsi_secret'] = out + if svc['iscsi_secret'] == "": + svc['iscsi_secret'] = utils.generate_password()[0:15] + self.bend.set_targetsecret(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password'], + svc['iscsi_target'], + svc['hdp'], + svc['iscsi_secret']) + + LOG.info("Set tgt CHAP secret for service: %s" + % (label)) + else: + # We set blank password when the client does not + # support CHAP. Later on, if the client tries to create a new + # target that does not exists in the backend, we check for this + # value and use a temporary dummy password. + if 'iscsi_secret' not in svc: + # Warns in the first time + LOG.info("CHAP authentication disabled") + + svc['iscsi_secret'] = "" + + if 'iscsi_target' not in svc: + LOG.info(_("Retrieving target for service: %s") % label) + + out = self.bend.get_targetiqn(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password'], + 'cinder-' + label, + svc['hdp'], + svc['iscsi_secret']) + svc['iscsi_target'] = out + + self.config['services'][label] = svc + + service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'], + svc['port'], svc['hdp'], svc['iscsi_target'], + svc['iscsi_secret']) + else: + LOG.info(_("Available services: %s") + % self.config['services'].keys()) + LOG.error(_("No configuration found for service: %s") + % label) + raise exception.ParameterNotFound(param=label) + + return service + + def _get_stats(self): + """Get HDP stats from HNAS.""" + + total_cap = 0 + total_used = 0 + out = self.bend.get_hdp_info(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password']) + + for line in out.split('\n'): + if 'HDP' in line: + (hdp, size, _ign, used) = line.split()[1:5] # in MB + LOG.debug(_("stats: looking for: %s") % hdp) + if int(hdp) >= units.KiB: # HNAS fsid + hdp = line.split()[11] + if hdp in self.config['hdp'].keys(): + total_cap += int(size) + total_used += int(used) + + LOG.info("stats: total: %d used: %d" % (total_cap, total_used)) + + hnas_stat = {} + hnas_stat['total_capacity_gb'] = int(total_cap / units.KiB) # in GB + hnas_stat['free_capacity_gb'] = \ + int((total_cap - total_used) / units.KiB) + be_name = self.configuration.safe_get('volume_backend_name') + hnas_stat["volume_backend_name"] = be_name or 'HDSISCSIDriver' + hnas_stat["vendor_name"] = 'HDS' + hnas_stat["driver_version"] = HDS_HNAS_ISCSI_VERSION + hnas_stat["storage_protocol"] = 'iSCSI' + hnas_stat['QoS_support'] = False + hnas_stat['reserved_percentage'] = 0 + + LOG.info(_("stats: stats: %s") % hnas_stat) + return hnas_stat + + def _get_hdp_list(self): + """Get HDPs from HNAS.""" + + out = self.bend.get_hdp_info(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password']) + + hdp_list = [] + for line in out.split('\n'): + if 'HDP' in line: + inf = line.split() + if int(inf[1]) >= units.KiB: + # HDP fsids start at units.KiB (1024) + hdp_list.append(inf[11]) + else: + # HDP pools are 2-digits max + hdp_list.extend(inf[1:2]) + + # returns a list of HDP IDs + LOG.info(_("HDP list: %s") % hdp_list) + return hdp_list + + def _check_hdp_list(self): + """Verify HDPs in HNAS array. + + Verify that all HDPs specified in the configuration files actually + exists on the storage. + """ + + hdpl = self._get_hdp_list() + lst = self.config['hdp'].keys() + + for hdp in lst: + if hdp not in hdpl: + LOG.error(_("HDP not found: %s") % hdp) + err = "HDP not found: " + hdp + raise exception.ParameterNotFound(param=err) + # status, verify corresponding status is Normal + + def _id_to_vol(self, volume_id): + """Given the volume id, retrieve the volume object from database. + :param volume_id: volume id string + """ + + vol = self.db.volume_get(self.context, volume_id) + + return vol + + def _update_vol_location(self, volume_id, loc): + """Update the provider location. + :param volume_id: volume id string + :param loc: string provider location value + """ + + update = {'provider_location': loc} + self.db.volume_update(self.context, volume_id, update) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + + pass + + def do_setup(self, context): + """Setup and verify HDS HNAS storage connection.""" + + self.context = context + (self.arid, self.hnas_name, self.lumax) = self._array_info_get() + self._check_hdp_list() + + iscsi_info = self._get_iscsi_info() + LOG.info(_("do_setup: %s") % iscsi_info) + for svc in self.config['services'].keys(): + svc_ip = self.config['services'][svc]['iscsi_ip'] + if svc_ip in iscsi_info.keys(): + LOG.info(_("iSCSI portal found for service: %s") % svc_ip) + self.config['services'][svc]['port'] = \ + iscsi_info[svc_ip]['port'] + self.config['services'][svc]['ctl'] = iscsi_info[svc_ip]['ctl'] + self.config['services'][svc]['iscsi_port'] = \ + iscsi_info[svc_ip]['iscsi_port'] + else: # config iscsi address not found on device! + LOG.error(_("iSCSI portal not found for service: %s") % svc_ip) + raise exception.ParameterNotFound(param=svc_ip) + + def ensure_export(self, context, volume): + pass + + def create_export(self, context, volume): + """Create an export. Moved to initialize_connection. + :param context: + :param volume: volume reference + """ + + name = volume['name'] + LOG.debug(_("create_export %(name)s") % {'name': name}) + + pass + + def remove_export(self, context, volume): + """Disconnect a volume from an attached instance. + :param context: context + :param volume: dictionary volume referencej + """ + + provider = volume['provider_location'] + name = volume['name'] + LOG.debug(_("remove_export provider %(provider)s on %(name)s") + % {'provider': provider, + 'name': name}) + + pass + + def create_volume(self, volume): + """Create a LU on HNAS. + :param volume: ditctionary volume reference + """ + + service = self._get_service(volume) + (_ip, _ipp, _ctl, _port, hdp, target, secret) = service + out = self.bend.create_lu(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password'], + hdp, + '%s' % (int(volume['size']) * units.KiB), + volume['name']) + + LOG.info(_("create_volume: create_lu returns %s") % out) + + lun = self.arid + '.' + out.split()[1] + sz = int(out.split()[5]) + + # Example: 92210013.volume-44d7e29b-2aa4-4606-8bc4-9601528149fd + LOG.info(_("LUN %(lun)s of size %(sz)s MB is created.") + % {'lun': lun, 'sz': sz}) + return {'provider_location': lun} + + def create_cloned_volume(self, dst, src): + """Create a clone of a volume. + :param dst: ditctionary destination volume reference + :param src: ditctionary source volume reference + """ + + if src['size'] != dst['size']: + msg = 'clone volume size mismatch' + raise exception.VolumeBackendAPIException(data=msg) + service = self._get_service(dst) + (_ip, _ipp, _ctl, _port, hdp, target, secret) = service + size = int(src['size']) * units.KiB + source_vol = self._id_to_vol(src['id']) + (arid, slun) = _loc_info(source_vol['provider_location'])['id_lu'] + out = self.bend.create_dup(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password'], + slun, hdp, '%s' % size, + dst['name']) + + lun = self.arid + '.' + out.split()[1] + size = int(out.split()[5]) + + LOG.debug(_("LUN %(lun)s of size %(size)s MB is cloned.") + % {'lun': lun, + 'size': size}) + return {'provider_location': lun} + + def extend_volume(self, volume, new_size): + """Extend an existing volume. + + :param volume: dictionary volume reference + :param new_size: int size in GB to extend + """ + + service = self._get_service(volume) + (_ip, _ipp, _ctl, _port, hdp, target, secret) = service + (arid, lun) = _loc_info(volume['provider_location'])['id_lu'] + self.bend.extend_vol(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password'], + hdp, lun, + '%s' % (new_size * units.KiB), + volume['name']) + + LOG.info(_("LUN %(lun)s extended to %(size)s GB.") + % {'lun': lun, 'size': new_size}) + + def delete_volume(self, volume): + """Delete an LU on HNAS. + :param volume: dictionary volume reference + """ + + prov_loc = volume['provider_location'] + if prov_loc is None: + LOG.error("delete_vol: provider location empty.") + return + info = _loc_info(prov_loc) + (arid, lun) = info['id_lu'] + if 'tgt' in info.keys(): # connected? + LOG.info("delete lun loc %s" % info['tgt']) + # loc = id.lun + (_portal, iqn, loc, ctl, port, hlun) = info['tgt'] + self.bend.del_iscsi_conn(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password'], + ctl, iqn, hlun) + + name = self.hnas_name + + LOG.debug(_("delete lun %(lun)s on %(name)s") + % {'lun': lun, + 'name': name}) + + service = self._get_service(volume) + (_ip, _ipp, _ctl, _port, hdp, target, secret) = service + self.bend.delete_lu(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password'], + hdp, lun) + + def initialize_connection(self, volume, connector): + """Map the created volume to connector['initiator']. + + :param volume: dictionary volume reference + :param connector: dictionary connector reference + """ + + LOG.info("initialize volume %s connector %s" % (volume, connector)) + + # connector[ip, host, wwnns, unititator, wwp/ + service = self._get_service(volume) + (ip, ipp, ctl, port, _hdp, target, secret) = service + info = _loc_info(volume['provider_location']) + + if 'tgt' in info.keys(): # spurious repeat connection + # print info.keys() + LOG.debug("initiate_conn: tgt already set %s" % info['tgt']) + (arid, lun) = info['id_lu'] + loc = arid + '.' + lun + # sps, use target if provided + iqn = target + out = self.bend.add_iscsi_conn(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password'], + lun, _hdp, port, iqn, + connector['initiator']) + + hnas_portal = ip + ':' + ipp + # sps need hlun, fulliqn + hlun = out.split()[1] + fulliqn = out.split()[13] + tgt = hnas_portal + ',' + iqn + ',' + loc + ',' + ctl + ',' + tgt += port + ',' + hlun + + LOG.info("initiate: connection %s" % tgt) + + properties = {} + properties['provider_location'] = tgt + self._update_vol_location(volume['id'], tgt) + properties['target_discovered'] = False + properties['target_portal'] = hnas_portal + properties['target_iqn'] = fulliqn + properties['target_lun'] = hlun + properties['volume_id'] = volume['id'] + properties['auth_username'] = connector['initiator'] + + if self.config['chap_enabled'] == 'True': + properties['auth_method'] = 'CHAP' + properties['auth_password'] = secret + + return {'driver_volume_type': 'iscsi', 'data': properties} + + def terminate_connection(self, volume, connector, **kwargs): + """Terminate a connection to a volume. + + :param volume: dictionary volume reference + :param connector: dictionary connector reference + """ + + info = _loc_info(volume['provider_location']) + if 'tgt' not in info.keys(): # spurious disconnection + LOG.warn("terminate_conn: provider location empty.") + return + (arid, lun) = info['id_lu'] + (_portal, iqn, loc, ctl, port, hlun) = info['tgt'] + LOG.info("terminate: connection %s" % volume['provider_location']) + self.bend.del_iscsi_conn(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password'], + ctl, iqn, hlun) + self._update_vol_location(volume['id'], loc) + + return {'provider_location': loc} + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot. + + :param volume: dictionary volume reference + :param snapshot: dictionary snapshot reference + """ + + size = int(snapshot['volume_size']) * units.KiB + (arid, slun) = _loc_info(snapshot['provider_location'])['id_lu'] + service = self._get_service(volume) + (_ip, _ipp, _ctl, _port, hdp, target, secret) = service + out = self.bend.create_dup(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password'], + slun, hdp, '%s' % (size), + volume['name']) + lun = self.arid + '.' + out.split()[1] + sz = int(out.split()[5]) + + LOG.debug(_("LUN %(lun)s of size %(sz)s MB is created from snapshot.") + % {'lun': lun, 'sz': sz}) + return {'provider_location': lun} + + def create_snapshot(self, snapshot): + """Create a snapshot. + :param snapshot: dictionary snapshot reference + """ + + source_vol = self._id_to_vol(snapshot['volume_id']) + service = self._get_service(source_vol) + (_ip, _ipp, _ctl, _port, hdp, target, secret) = service + size = int(snapshot['volume_size']) * units.KiB + (arid, slun) = _loc_info(source_vol['provider_location'])['id_lu'] + out = self.bend.create_dup(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password'], + slun, hdp, + '%s' % (size), + snapshot['name']) + lun = self.arid + '.' + out.split()[1] + size = int(out.split()[5]) + + LOG.debug(_("LUN %(lun)s of size %(size)s MB is created.") + % {'lun': lun, 'size': size}) + return {'provider_location': lun} + + def delete_snapshot(self, snapshot): + """Delete a snapshot. + + :param snapshot: dictionary snapshot reference + """ + + loc = snapshot['provider_location'] + + # to take care of spurious input + if loc is None: + # which could cause exception. + return + + (arid, lun) = loc.split('.') + source_vol = self._id_to_vol(snapshot['volume_id']) + service = self._get_service(source_vol) + (_ip, _ipp, _ctl, _port, hdp, target, secret) = service + myid = self.arid + + if arid != myid: + LOG.error(_('Array mismatch %(myid)s vs %(arid)s') + % {'myid': myid, + 'arid': arid}) + msg = 'Array id mismatch in delete snapshot' + raise exception.VolumeBackendAPIException(data=msg) + self.bend.delete_lu(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password'], + hdp, lun) + + LOG.debug(_("LUN %s is deleted.") % lun) + return + + def get_volume_stats(self, refresh=False): + """Get volume stats. If 'refresh', run update the stats first.""" + + if refresh: + self.driver_stats = self._get_stats() + + return self.driver_stats diff --git a/cinder/volume/drivers/hds/nfs.py b/cinder/volume/drivers/hds/nfs.py new file mode 100644 index 000000000..b20a3ad76 --- /dev/null +++ b/cinder/volume/drivers/hds/nfs.py @@ -0,0 +1,494 @@ +# Copyright (c) 2014 Hitachi Data Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Volume driver for HDS HNAS NFS storage. +""" + +import os +import time + +from oslo.config import cfg +from xml.etree import ElementTree as ETree + +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils +from cinder import units +from cinder.volume.drivers.hds.hnas_backend import HnasBackend +from cinder.volume.drivers import nfs + + +HDS_HNAS_NFS_VERSION = '1.0.0' + +LOG = logging.getLogger(__name__) + +NFS_OPTS = [ + cfg.StrOpt('hds_hnas_nfs_config_file', + default='/opt/hds/hnas/cinder_nfs_conf.xml', + help='configuration file for HDS NFS cinder plugin'), ] + +CONF = cfg.CONF +CONF.register_opts(NFS_OPTS) + +HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc'} + + +def _xml_read(root, element, check=None): + """Read an xml element. + + :param root: XML object + :param element: string desired tag + :param check: string if present, throw exception if element missing + """ + + try: + val = root.findtext(element) + LOG.info(_("%(element)s: %(val)s") + % {'element': element, + 'val': val}) + if val: + return val.strip() + if check: + raise exception.ParameterNotFound(param=element) + return None + except ETree.ParseError: + if check: + with excutils.save_and_reraise_exception(): + LOG.error(_("XML exception reading parameter: %s") % element) + else: + LOG.info(_("XML exception reading parameter: %s") % element) + return None + + +def _read_config(xml_config_file): + """Read hds driver specific xml config file. + + :param xml_config_file: string filename containing XML configuration + """ + + try: + root = ETree.parse(xml_config_file).getroot() + except Exception: + raise exception.NotFound(message='config file not found: ' + + xml_config_file) + + # mandatory parameters + config = {} + arg_prereqs = ['mgmt_ip0', 'username', 'password'] + for req in arg_prereqs: + config[req] = _xml_read(root, req, 'check') + + # optional parameters + config['hnas_cmd'] = _xml_read(root, 'hnas_cmd') or\ + HNAS_DEFAULT_CONFIG['hnas_cmd'] + + config['hdp'] = {} + config['services'] = {} + + # min one needed + for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']: + if _xml_read(root, svc) is None: + continue + service = {'label': svc} + + # none optional + for arg in ['volume_type', 'hdp']: + service[arg] = _xml_read(root, svc + '/' + arg, 'check') + config['services'][service['volume_type']] = service + config['hdp'][service['hdp']] = service['hdp'] + + # at least one service required! + if config['services'].keys() is None: + raise exception.ParameterNotFound(param="No service found") + + return config + + +def factory_bend(): + """Factory over-ride in self-tests.""" + + return HnasBackend() + + +class HDSNFSDriver(nfs.NfsDriver): + """Base class for Hitachi NFS driver. + Executes commands relating to Volumes. + """ + + def __init__(self, *args, **kwargs): + # NOTE(vish): db is set by Manager + self._execute = None + self.context = None + self.configuration = kwargs.get('configuration', None) + + if self.configuration: + self.configuration.append_config_values(NFS_OPTS) + self.config = _read_config( + self.configuration.hds_hnas_nfs_config_file) + + super(HDSNFSDriver, self).__init__(*args, **kwargs) + self.bend = factory_bend() + (self.arid, self.nfs_name, self.lumax) = self._array_info_get() + + def _array_info_get(self): + """Get array parameters.""" + + out = self.bend.get_version(self.config['hnas_cmd'], + HDS_HNAS_NFS_VERSION, + self.config['mgmt_ip0'], + self.config['username'], + self.config['password']) + + inf = out.split() + return inf[1], 'nfs_' + inf[1], inf[6] + + def _id_to_vol(self, volume_id): + """Given the volume id, retrieve the volume object from database. + + :param volume_id: string volume id + """ + + vol = self.db.volume_get(self.context, volume_id) + + return vol + + def _get_service(self, volume): + """Get the available service parameters for a given volume using + its type. + + :param volume: dictionary volume reference + """ + + label = None + if volume['volume_type']: + label = volume['volume_type']['name'] + label = label or 'default' + if label not in self.config['services'].keys(): + # default works if no match is found + label = 'default' + if label in self.config['services'].keys(): + svc = self.config['services'][label] + LOG.info("Get service: %s->%s" % (label, svc['fslabel'])) + service = (svc['hdp'], svc['path'], svc['fslabel']) + else: + LOG.info(_("Available services: %s") + % self.config['services'].keys()) + LOG.error(_("No configuration found for service: %s") % label) + raise exception.ParameterNotFound(param=label) + + return service + + def set_execute(self, execute): + self._execute = execute + + def extend_volume(self, volume, new_size): + """Extend an existing volume. + + :param volume: dictionary volume reference + :param new_size: int size in GB to extend + """ + + nfs_mount = self._get_provider_location(volume['id']) + path = self._get_volume_path(nfs_mount, volume['name']) + + # Resize the image file on share to new size. + LOG.debug(_('Checking file for resize')) + + if self._is_file_size_equal(path, new_size): + return + else: + LOG.info(_('Resizing file to %sG'), new_size) + image_utils.resize_image(path, new_size) + if self._is_file_size_equal(path, new_size): + LOG.info(_("LUN %(id)s extended to %(size)s GB.") + % {'id': volume['id'], 'size': new_size}) + return + else: + raise exception.InvalidResults( + _('Resizing image file failed.')) + + def _is_file_size_equal(self, path, size): + """Checks if file size at path is equal to size.""" + + data = image_utils.qemu_img_info(path) + virt_size = data.virtual_size / units.GiB + + if virt_size == size: + return True + else: + return False + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + + LOG.debug(_('create_volume_from %s') % volume) + vol_size = volume['size'] + snap_size = snapshot['volume_size'] + + if vol_size != snap_size: + msg = _('Cannot create volume of size %(vol_size)s from ' + 'snapshot of size %(snap_size)s') + msg_fmt = {'vol_size': vol_size, 'snap_size': snap_size} + raise exception.CinderException(msg % msg_fmt) + + self._clone_volume(snapshot['name'], + volume['name'], + snapshot['volume_id']) + share = self._get_volume_location(snapshot['volume_id']) + + return {'provider_location': share} + + def create_snapshot(self, snapshot): + """Create a snapshot. + + :param snapshot: dictionary snapshot reference + """ + + self._clone_volume(snapshot['volume_name'], + snapshot['name'], + snapshot['volume_id']) + share = self._get_volume_location(snapshot['volume_id']) + LOG.debug(_('Share: %s'), share) + + # returns the mount point (not path) + return {'provider_location': share} + + def delete_snapshot(self, snapshot): + """Deletes a snapshot. + + :param snapshot: dictionary snapshot reference + """ + + nfs_mount = self._get_provider_location(snapshot['volume_id']) + + if self._volume_not_present(nfs_mount, snapshot['name']): + return True + + self._execute('rm', self._get_volume_path(nfs_mount, snapshot['name']), + run_as_root=True) + + def _get_volume_location(self, volume_id): + """Returns NFS mount address as :. + + :param volume_id: string volume id + """ + + nfs_server_ip = self._get_host_ip(volume_id) + export_path = self._get_export_path(volume_id) + + return nfs_server_ip + ':' + export_path + + def _get_provider_location(self, volume_id): + """Returns provider location for given volume. + + :param volume_id: string volume id + """ + + volume = self.db.volume_get(self.context, volume_id) + + # same format as _get_volume_location + return volume.provider_location + + def _get_host_ip(self, volume_id): + """Returns IP address for the given volume. + + :param volume_id: string volume id + """ + + return self._get_provider_location(volume_id).split(':')[0] + + def _get_export_path(self, volume_id): + """Returns NFS export path for the given volume. + + :param volume_id: string volume id + """ + + return self._get_provider_location(volume_id).split(':')[1] + + def _volume_not_present(self, nfs_mount, volume_name): + """Check if volume exists. + + :param volume_name: string volume name + """ + + try: + self._try_execute('ls', self._get_volume_path(nfs_mount, + volume_name)) + except processutils.ProcessExecutionError: + # If the volume isn't present + return True + + return False + + def _try_execute(self, *command, **kwargs): + # NOTE(vish): Volume commands can partially fail due to timing, but + # running them a second time on failure will usually + # recover nicely. + tries = 0 + while True: + try: + self._execute(*command, **kwargs) + return True + except processutils.ProcessExecutionError: + tries += 1 + if tries >= self.configuration.num_shell_tries: + raise + LOG.exception(_("Recovering from a failed execute. " + "Try number %s"), tries) + time.sleep(tries ** 2) + + def _get_volume_path(self, nfs_share, volume_name): + """Get volume path (local fs path) for given volume name on given nfs + share. + + :param nfs_share string, example 172.18.194.100:/var/nfs + :param volume_name string, + example volume-91ee65ec-c473-4391-8c09-162b00c68a8c + """ + + return os.path.join(self._get_mount_point_for_share(nfs_share), + volume_name) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume. + + :param volume: dictionary volume reference + :param src_vref: dictionary src_vref reference + """ + + vol_size = volume['size'] + src_vol_size = src_vref['size'] + + if vol_size != src_vol_size: + msg = _('Cannot create clone of size %(vol_size)s from ' + 'volume of size %(src_vol_size)s') + msg_fmt = {'vol_size': vol_size, 'src_vol_size': src_vol_size} + raise exception.CinderException(msg % msg_fmt) + + self._clone_volume(src_vref['name'], volume['name'], src_vref['id']) + share = self._get_volume_location(src_vref['id']) + + return {'provider_location': share} + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + if 'refresh' is True, update the stats first. + """ + + _stats = super(HDSNFSDriver, self).get_volume_stats(refresh) + be_name = self.configuration.safe_get('volume_backend_name') + _stats["volume_backend_name"] = be_name or 'HDSNFSDriver' + _stats["vendor_name"] = 'HDS' + _stats["driver_version"] = HDS_HNAS_NFS_VERSION + _stats["storage_protocol"] = 'NFS' + + return _stats + + def _get_nfs_info(self): + out = self.bend.get_nfs_info(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password']) + lines = out.split('\n') + + # dict based on NFS exports addresses + conf = {} + for line in lines: + if 'Export' in line: + inf = line.split() + (export, path, fslabel, hdp, evs, ip1) = \ + inf[1], inf[3], inf[5], inf[7], inf[9], inf[11] + # 9, 10, etc are IP addrs + key = ip1 + ':' + export + conf[key] = {} + conf[key]['path'] = path + conf[key]['hdp'] = hdp + conf[key]['fslabel'] = fslabel + msg = _('nfs_info: %(key)s: %(path)s, HDP: \ + %(fslabel)s FSID: %(hdp)s') + LOG.info(msg + % {'key': key, + 'path': path, + 'fslabel': fslabel, + 'hdp': hdp}) + + return conf + + def do_setup(self, context): + """Perform internal driver setup.""" + + self.context = context + self._load_shares_config(getattr(self.configuration, + self.driver_prefix + + '_shares_config')) + LOG.info("Review shares: %s" % self.shares) + + nfs_info = self._get_nfs_info() + + for share in self.shares: + #export = share.split(':')[1] + if share in nfs_info.keys(): + LOG.info("share: %s -> %s" % (share, nfs_info[share]['path'])) + + for svc in self.config['services'].keys(): + if share == self.config['services'][svc]['hdp']: + self.config['services'][svc]['path'] = \ + nfs_info[share]['path'] + # don't overwrite HDP value + self.config['services'][svc]['fsid'] = \ + nfs_info[share]['hdp'] + self.config['services'][svc]['fslabel'] = \ + nfs_info[share]['fslabel'] + LOG.info("Save service info for %s -> %s, %s" + % (svc, nfs_info[share]['hdp'], + nfs_info[share]['path'])) + break + if share != self.config['services'][svc]['hdp']: + LOG.error("NFS share %s has no service entry: %s -> %s" + % (share, svc, + self.config['services'][svc]['hdp'])) + raise exception.ParameterNotFound(param=svc) + else: + LOG.info("share: %s incorrect entry" % share) + + def _clone_volume(self, volume_name, clone_name, volume_id): + """Clones mounted volume using the HNAS file_clone. + + :param volume_name: string volume name + :param clone_name: string clone name (or snapshot) + :param volume_id: string volume id + """ + + export_path = self._get_export_path(volume_id) + # volume-ID snapshot-ID, /cinder + LOG.info("Cloning with volume_name %s clone_name %s export_path %s" + % (volume_name, clone_name, export_path)) + + source_vol = self._id_to_vol(volume_id) + # sps; added target + (_hdp, _path, _fslabel) = self._get_service(source_vol) + target_path = '%s/%s' % (_path, clone_name) + source_path = '%s/%s' % (_path, volume_name) + out = self.bend.file_clone(self.config['hnas_cmd'], + self.config['mgmt_ip0'], + self.config['username'], + self.config['password'], + _fslabel, source_path, target_path) + + return out diff --git a/etc/cinder/cinder.conf.sample b/etc/cinder/cinder.conf.sample index 4784a5696..599d0f075 100644 --- a/etc/cinder/cinder.conf.sample +++ b/etc/cinder/cinder.conf.sample @@ -1126,6 +1126,23 @@ #hds_cinder_config_file=/opt/hds/hus/cinder_hus_conf.xml +# +# Options defined in cinder.volume.drivers.hds.iscsi +# + +# configuration file for HDS iSCSI cinder plugin (string +# value) +#hds_hnas_iscsi_config_file=/opt/hds/hnas/cinder_iscsi_conf.xml + + +# +# Options defined in cinder.volume.drivers.hds.nfs +# + +# configuration file for HDS NFS cinder plugin (string value) +#hds_hnas_nfs_config_file=/opt/hds/hnas/cinder_nfs_conf.xml + + # # Options defined in cinder.volume.drivers.huawei # diff --git a/etc/cinder/rootwrap.d/volume.filters b/etc/cinder/rootwrap.d/volume.filters index 5b574040a..8cab777a1 100644 --- a/etc/cinder/rootwrap.d/volume.filters +++ b/etc/cinder/rootwrap.d/volume.filters @@ -79,6 +79,9 @@ chgrp: CommandFilter, chgrp, root hus-cmd: CommandFilter, hus-cmd, root hus-cmd_local: CommandFilter, /usr/local/bin/hus-cmd, root +# cinder/volumes/drivers/hds/hnas_backend.py +ssc: CommandFilter, ssc, root + # cinder/brick/initiator/connector.py: ls: CommandFilter, ls, root tee: CommandFilter, tee, root -- 2.45.2