From 0c33d1443447e76481fdcc19af1b000d60219d10 Mon Sep 17 00:00:00 2001 From: Xing Yang Date: Fri, 14 Feb 2014 11:47:33 -0500 Subject: [PATCH] Add EMC VNX Direct Driver in Cinder This patch implements a driver based on the Cinder iSCSIDrver. It performs volume operations on VNX using the NaviSecCLI command line tool. It supports all required driver features. Implements blueprint emc-vnx-direct-driver Change-Id: Iec1786612cc19452aca806b41c6be664680a923b --- cinder/tests/test_emc_vnxdirect.py | 593 ++++++++++++++++ cinder/volume/drivers/emc/emc_cli_iscsi.py | 257 +++++++ cinder/volume/drivers/emc/emc_vnx_cli.py | 746 +++++++++++++++++++++ etc/cinder/cinder.conf.sample | 19 + 4 files changed, 1615 insertions(+) create mode 100644 cinder/tests/test_emc_vnxdirect.py create mode 100644 cinder/volume/drivers/emc/emc_cli_iscsi.py create mode 100644 cinder/volume/drivers/emc/emc_vnx_cli.py diff --git a/cinder/tests/test_emc_vnxdirect.py b/cinder/tests/test_emc_vnxdirect.py new file mode 100644 index 000000000..f1a62ef30 --- /dev/null +++ b/cinder/tests/test_emc_vnxdirect.py @@ -0,0 +1,593 @@ +# Copyright (c) 2012 - 2014 EMC Corporation, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import os + +import mock + +from cinder import exception +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.emc.emc_cli_iscsi import EMCCLIISCSIDriver +from cinder.volume.drivers.emc.emc_vnx_cli import EMCVnxCli +from cinder.volume import volume_types + + +class EMCVNXCLIDriverTestData(): + + test_volume = { + 'name': 'vol1', + 'size': 1, + 'volume_name': 'vol1', + 'id': '1', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'vol1', + 'display_description': 'test volume', + 'volume_type_id': None} + test_volfromsnap = { + 'name': 'volfromsnap', + 'size': 1, + 'volume_name': 'volfromsnap', + 'id': '10', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'volfromsnap', + 'display_description': 'test volume', + 'volume_type_id': None} + test_volfromsnap_e = { + 'name': 'volfromsnap_e', + 'size': 1, + 'volume_name': 'volfromsnap_e', + 'id': '20', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'volfromsnap_e', + 'display_description': 'test volume', + 'volume_type_id': None} + test_failed_volume = { + 'name': 'failed_vol1', + 'size': 1, + 'volume_name': 'failed_vol1', + 'id': '4', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'failed_vol', + 'display_description': 'test failed volume', + 'volume_type_id': None} + test_snapshot = { + 'name': 'snapshot1', + 'size': 1, + 'id': '4444', + 'volume_name': 'vol-vol1', + 'volume_size': 1, + 'project_id': 'project'} + test_failed_snapshot = { + 'name': 'failed_snapshot', + 'size': 1, + 'id': '5555', + 'volume_name': 'vol-vol1', + 'volume_size': 1, + 'project_id': 'project'} + test_clone = { + 'name': 'clone1', + 'size': 1, + 'id': '20', + 'volume_name': 'clone1', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'clone1', + 'display_description': 'volume created from snapshot', + 'volume_type_id': None} + test_clone_e = { + 'name': 'clone1_e', + 'size': 1, + 'id': '28', + 'volume_name': 'clone1_e', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'clone1_e', + 'display_description': 'volume created from snapshot', + 'volume_type_id': None} + test_clone_src = { + 'name': 'clone1src', + 'size': 1, + 'id': '22', + 'volume_name': 'clone1src', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'clone1src', + 'display_description': 'volume created from snapshot', + 'volume_type_id': None} + connector = { + 'ip': '10.0.0.2', + 'initiator': 'iqn.1993-08.org.debian:01:222', + 'wwpns': ["123456789012345", "123456789054321"], + 'wwnns': ["223456789012345", "223456789054321"], + 'host': 'fakehost'} + + +class EMCVNXCLIDriverISCSITestCase(test.TestCase): + + def _fake_cli_executor(self, *cmd, **kwargv): + # mock cli + if cmd == ("storagepool", "-list", + "-name", "unit_test_pool", "-state"): + return None, 0 + elif cmd == ('storagepool', '-list', + '-name', 'unit_test_pool', '-userCap', '-availableCap'): + pool_details = "test\ntest\ntest\ntotal capacity:10000\n" + \ + "test\nfree capacity:1000\ntest\ntest" + return pool_details, 0 + elif cmd == ('lun', '-create', '-type', 'NonThin', + '-capacity', 1, '-sq', 'gb', + '-poolName', 'unit_test_pool', '-name', 'vol1'): + return None, 0 + elif cmd == ('lun', '-create', '-type', 'NonThin', + '-capacity', 1, '-sq', 'gb', + '-poolName', 'unit_test_pool', '-name', 'failed_vol1'): + return None, 1023 + elif cmd == ('lun', '-create', '-type', 'Thin', + '-capacity', 1, '-sq', 'gb', + '-poolName', 'unit_test_pool', '-name', 'vol1'): + return None, 0 + elif cmd == ('lun', '-list', '-name', 'vol1'): + return " 10\nReady", 0 + elif cmd == ('lun', '-destroy', '-name', 'vol1', + '-forceDetach', '-o'): + return "Lun deleted successfully", 0 + elif cmd == ('lun', '-destroy', '-name', 'failed_vol1', + '-forceDetach', '-o'): + return "Lun deleted successfully", 1023 + elif cmd == ('lun', '-list', '-name', 'vol-vol1'): + return " 16\n", 0 + elif cmd == ('snap', '-create', '-res', '16', '-name', + 'snapshot1', '-allowReadWrite', 'yes'): + return "Create Snap successfully", 0 + elif cmd == ('snap', '-create', '-res', '16', '-name', + 'failed_snapshot', '-allowReadWrite', 'yes'): + return "Create Snap failed", 1023 + elif cmd == ('snap', '-destroy', '-id', 'snapshot1', '-o'): + return "Delete Snap successfully", 0 + elif cmd == ('lun', '-create', '-type', 'NonThin', + '-capacity', 1, '-sq', 'gb', + '-poolName', 'unit_test_pool', '-name', + 'volfromsnapdest'): + return "create temp volume successfully", 0 + elif cmd == ('lun', '-create', '-type', 'NonThin', + '-capacity', 1, '-sq', 'gb', + '-poolName', 'unit_test_pool', '-name', + 'volfromsnap_edest'): + return "create temp volume successfully", 0 + elif cmd == ('lun', '-create', '-type', 'Snap', + '-primaryLunName', 'vol-vol1', '-name', 'volfromsnap'): + return "create mount point successfully", 0 + elif cmd == ('lun', '-create', '-type', 'Snap', + '-primaryLunName', 'vol-vol1', '-name', 'volfromsnap_e'): + return "create mount point successfully", 0 + elif cmd == ('lun', '-attach', '-name', 'volfromsnap', + '-snapName', 'snapshot1'): + return None, 0 + elif cmd == ('lun', '-attach', '-name', 'volfromsnap_e', + '-snapName', 'snapshot1'): + return None, 0 + elif cmd == ('lun', '-list', '-name', 'volfromsnap'): + return " 10\n", 0 + elif cmd == ('lun', '-list', '-name', 'volfromsnapdest'): + return " 101\n", 0 + elif cmd == ('lun', '-list', '-name', 'volfromsnap_e'): + return " 20\n", 0 + elif cmd == ('lun', '-list', '-name', 'volfromsnap_edest'): + return " 201\n", 0 + elif cmd == ('migrate', '-start', '-source', '10', '-dest', '101', + '-rate', 'ASAP', '-o'): + return None, 0 + elif cmd == ('migrate', '-start', '-source', '20', '-dest', '201', + '-rate', 'ASAP', '-o'): + return None, 0 + elif cmd == ('lun', '-list', '-name', 'volfromsnap', + '-attachedSnapshot'): + return "\n test \n :N/A", 0 + elif cmd == ('lun', '-list', '-name', 'volfromsnap_e', + '-attachedSnapshot'): + return "\n test \n :N", 0 + elif cmd == ('snap', '-create', '-res', '22', '-name', + 'clone1src-temp-snapshot', '-allowReadWrite', 'yes'): + return "Create Snap successfully", 0 + elif cmd == ('lun', '-list', '-name', 'clone1src'): + return " 22\n", 0 + elif cmd == ('lun', '-create', '-type', 'NonThin', + '-capacity', 1, '-sq', 'gb', + '-poolName', 'unit_test_pool', '-name', 'clone1dest'): + return "create temp volume successfully", 0 + elif cmd == ('lun', '-create', '-type', 'Snap', + '-primaryLunName', 'clone1src', '-name', 'clone1'): + return "create mount point successfully", 0 + elif cmd == ('lun', '-attach', '-name', 'clone1', + '-snapName', 'clone1src-temp-snapshot'): + return 'create temp snap successfully', 0 + elif cmd == ('lun', '-list', '-name', 'clone1'): + return " 30\n", 0 + elif cmd == ('lun', '-list', '-name', 'clone1dest'): + return " 301\n", 0 + elif cmd == ('migrate', '-start', '-source', '30', '-dest', '301', + '-rate', 'ASAP', '-o'): + return None, 0 + elif cmd == ('lun', '-list', '-name', 'clone1', + '-attachedSnapshot'): + return "\n test \n :N/A", 0 + elif cmd == ('snap', '-destroy', '-id', + 'clone1src-temp-snapshot', '-o'): + return None, 0 + elif cmd == ('lun', '-create', '-type', 'NonThin', + '-capacity', 1, '-sq', 'gb', + '-poolName', 'unit_test_pool', '-name', 'clone1_edest'): + return "create temp volume successfully", 0 + elif cmd == ('lun', '-create', '-type', 'Snap', + '-primaryLunName', 'clone1src', '-name', 'clone1_e'): + return "create mount point successfully", 0 + elif cmd == ('lun', '-attach', '-name', 'clone1_e', '-snapName', + 'clone1src-temp-snapshot'): + return None, 0 + elif cmd == ('lun', '-list', '-name', 'clone1_e'): + return " 40\n", 0 + elif cmd == ('lun', '-list', '-name', 'clone1_edest'): + return " 401\n", 0 + elif cmd == ('migrate', '-start', '-source', '40', '-dest', '401', + '-rate', 'ASAP', '-o'): + return None, 0 + elif cmd == ('lun', '-list', '-name', 'clone1_e', + '-attachedSnapshot'): + return "\n test \n :N", 0 + elif cmd == ('lun', '-expand', '-name', 'vol1', + '-capacity', 2, '-sq', 'gb', '-o', + '-ignoreThresholds'): + return "Expand volume successfully", 0 + elif cmd == ('lun', '-expand', '-name', 'failed_vol1', + '-capacity', 2, '-sq', 'gb', '-o', + '-ignoreThresholds'): + return "Expand volume failed because it has snap", 97 + elif cmd == ('lun', '-expand', '-name', 'failed_vol1', + '-capacity', 3, '-sq', 'gb', '-o', + '-ignoreThresholds'): + return "Expand volume failed", 1023 + elif cmd == ('storagegroup', '-list', '-gname', + 'fakehost'): + return '\nStorage Group Name: fakehost' + \ + '\nStorage Group UID: 78:47:C4:F2:CA:' + \ + '\n\nHLU/ALU Pairs:\n\n HLU Number ' + \ + 'ALU Number\n ---------- ----------\n' + \ + ' 10 64\nShareable: YES\n', 0 + elif cmd == ('lun', '-list', '-l', '10', '-owner'): + return '\n\nCurrent Owner: SP A', 0 + elif cmd == ('storagegroup', '-addhlu', '-o', '-gname', + 'fakehost', '-hlu', 1, '-alu', '10'): + return None, 0 + elif cmd == ('connection', '-getport', '-sp', 'A'): + return 'SP: A\nPort ID: 5\nPort WWN: iqn.1992-04.' + \ + 'com.emc:cx.fnm00124000215.a5\niSCSI Alias: 0215.a5\n', 0 + + def setUp(self): + # backup + back_os_path_exists = os.path.exists + self.addCleanup(self._restore, back_os_path_exists) + super(EMCVNXCLIDriverISCSITestCase, self).setUp() + self.configuration = conf.Configuration(None) + self.configuration.append_config_values = mock.Mock(return_value=0) + self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli' + self.configuration.san_ip = '10.0.0.1' + self.configuration.storage_vnx_pool_name = 'unit_test_pool' + self.configuration.san_login = 'sysadmin' + self.configuration.san_password = 'sysadmin' + self.configuration.default_timeout = 0 + self.testData = EMCVNXCLIDriverTestData() + self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \ + '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 ' + os.path.exists = mock.Mock(return_value=1) + EMCVnxCli._cli_execute = mock.Mock(side_effect=self._fake_cli_executor) + self.driver = EMCCLIISCSIDriver(configuration=self.configuration) + self.driver.cli.wait_interval = 0 + + def _restore(self, back_os_path_exists): + # recover + os.path.exists = back_os_path_exists + + def test_create_destroy_volume_withoutExtraSpec(self): + # case + self.driver.create_volume(self.testData.test_volume) + self.driver.delete_volume(self.testData.test_volume) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('lun', '-create', '-type', 'NonThin', + '-capacity', 1, '-sq', 'gb', '-poolName', + 'unit_test_pool', '-name', 'vol1'), + mock.call('lun', '-list', '-name', 'vol1'), + mock.call('lun', '-destroy', '-name', 'vol1', + '-forceDetach', '-o')] + EMCVnxCli._cli_execute.assert_has_calls(expected) + + def test_create_destroy_volume_withExtraSpec(self): + # mock + extra_specs = {'storage:provisioning': 'Thin'} + volume_types.get = mock.Mock(return_value=extra_specs) + # case + self.driver.create_volume(self.testData.test_volume) + self.driver.delete_volume(self.testData.test_volume) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('lun', '-create', '-type', 'NonThin', + '-capacity', 1, '-sq', 'gb', '-poolName', + 'unit_test_pool', '-name', 'vol1'), + mock.call('lun', '-list', '-name', 'vol1'), + mock.call('lun', '-destroy', '-name', 'vol1', + '-forceDetach', '-o')] + EMCVnxCli._cli_execute.assert_has_calls(expected) + + def test_get_volume_stats(self): + # mock + self.configuration.safe_get = mock.Mock(return_value=0) + # case + rc = self.driver.get_volume_stats(True) + stats = {'volume_backend_name': 'EMCCLIISCSIDriver', + 'free_capacity_gb': 1000.0, + 'driver_version': '02.00.00', 'total_capacity_gb': 10000.0, + 'reserved_percentage': 0, 'vendor_name': 'EMC', + 'storage_protocol': 'iSCSI'} + self.assertEqual(rc, stats) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-userCap', '-availableCap')] + EMCVnxCli._cli_execute.assert_has_calls(expected) + + def test_create_destroy_volume_snapshot(self): + # case + self.driver.create_snapshot(self.testData.test_snapshot) + self.driver.delete_snapshot(self.testData.test_snapshot) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('lun', '-list', '-name', 'vol-vol1'), + mock.call('snap', '-create', '-res', '16', '-name', + 'snapshot1', '-allowReadWrite', 'yes'), + mock.call('snap', '-destroy', '-id', 'snapshot1', '-o')] + EMCVnxCli._cli_execute.assert_has_calls(expected) + + @mock.patch.object( + EMCCLIISCSIDriver, + '_do_iscsi_discovery', + return_value=['10.0.0.3:3260,1 ' + 'iqn.1992-04.com.emc:cx.apm00123907237.a8', + '10.0.0.4:3260,2 ' + 'iqn.1992-04.com.emc:cx.apm00123907237.b8']) + def test_initialize_connection(self, _mock_iscsi_discovery): + # case + rc = self.driver.initialize_connection( + self.testData.test_volume, + self.testData.connector) + connect_info = {'driver_volume_type': 'iscsi', 'data': + {'target_lun': -1, 'volume_id': '1', + 'target_iqn': 'iqn.1992-04.com.emc:' + + 'cx.apm00123907237.b8', + 'target_discovered': True, + 'target_portal': '10.0.0.4:3260'}} + self.assertEqual(rc, connect_info) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('storagegroup', '-list', '-gname', 'fakehost'), + mock.call('lun', '-list', '-name', 'vol1'), + mock.call('lun', '-list', '-name', 'vol1'), + mock.call('storagegroup', '-list', '-gname', 'fakehost'), + mock.call('lun', '-list', '-l', '10', '-owner'), + mock.call('storagegroup', '-addhlu', '-o', '-gname', + 'fakehost', '-hlu', 1, '-alu', '10'), + mock.call('lun', '-list', '-name', 'vol1'), + mock.call('storagegroup', '-list', '-gname', 'fakehost'), + mock.call('lun', '-list', '-l', '10', '-owner'), + mock.call('connection', '-getport', '-sp', 'A')] + EMCVnxCli._cli_execute.assert_has_calls(expected) + + def test_terminate_connection(self): + # case + self.driver.terminate_connection(self.testData.test_volume, + self.testData.connector) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('storagegroup', '-list', '-gname', 'fakehost'), + mock.call('lun', '-list', '-name', 'vol1'), + mock.call('storagegroup', '-list', '-gname', 'fakehost'), + mock.call('lun', '-list', '-l', '10', '-owner')] + EMCVnxCli._cli_execute.assert_has_calls(expected) + + def test_create_volume_failed(self): + # case + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume, + self.testData.test_failed_volume) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('lun', '-create', '-type', 'NonThin', + '-capacity', 1, '-sq', 'gb', '-poolName', + 'unit_test_pool', '-name', 'failed_vol1')] + EMCVnxCli._cli_execute.assert_has_calls(expected) + + def test_create_volume_snapshot_failed(self): + # case + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_snapshot, + self.testData.test_failed_snapshot) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('lun', '-list', '-name', 'vol-vol1'), + mock.call('snap', '-create', '-res', '16', '-name', + 'failed_snapshot', '-allowReadWrite', 'yes')] + EMCVnxCli._cli_execute.assert_has_calls(expected) + + def test_create_volume_from_snapshot(self): + # case + self.driver.create_volume_from_snapshot(self.testData.test_volfromsnap, + self.testData.test_snapshot) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('lun', '-create', '-type', 'NonThin', + '-capacity', 1, '-sq', 'gb', '-poolName', + 'unit_test_pool', '-name', 'volfromsnapdest'), + mock.call('lun', '-create', '-type', 'Snap', + '-primaryLunName', 'vol-vol1', '-name', + 'volfromsnap'), + mock.call('lun', '-attach', '-name', 'volfromsnap', + '-snapName', 'snapshot1'), + mock.call('lun', '-list', '-name', 'volfromsnap'), + mock.call('lun', '-list', '-name', 'volfromsnapdest'), + mock.call('migrate', '-start', '-source', '10', '-dest', + '101', '-rate', 'ASAP', '-o'), + mock.call('lun', '-list', '-name', 'volfromsnap', + '-attachedSnapshot')] + EMCVnxCli._cli_execute.assert_has_calls(expected) + + def test_create_volume_from_snapshot_sync_failed(self): + # case + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + self.testData.test_volfromsnap_e, + self.testData.test_snapshot) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('lun', '-create', '-type', 'NonThin', + '-capacity', 1, '-sq', 'gb', '-poolName', + 'unit_test_pool', '-name', 'volfromsnap_edest'), + mock.call('lun', '-create', '-type', 'Snap', + '-primaryLunName', 'vol-vol1', '-name', + 'volfromsnap_e'), + mock.call('lun', '-attach', '-name', 'volfromsnap_e', + '-snapName', 'snapshot1'), + mock.call('lun', '-list', '-name', 'volfromsnap_e'), + mock.call('lun', '-list', '-name', 'volfromsnap_edest'), + mock.call('migrate', '-start', '-source', '20', '-dest', + '201', '-rate', 'ASAP', '-o'), + mock.call('lun', '-list', '-name', 'volfromsnap_e', + '-attachedSnapshot')] + EMCVnxCli._cli_execute.assert_has_calls(expected) + + def test_create_cloned_volume(self): + # case + self.driver.create_cloned_volume(self.testData.test_clone, + self.testData.test_clone_src) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('lun', '-list', '-name', 'clone1src'), + mock.call('snap', '-create', '-res', '22', '-name', + 'clone1src-temp-snapshot', '-allowReadWrite', + 'yes'), + mock.call('lun', '-create', '-type', 'NonThin', + '-capacity', 1, '-sq', 'gb', '-poolName', + 'unit_test_pool', '-name', 'clone1dest'), + mock.call('lun', '-create', '-type', 'Snap', + '-primaryLunName', 'clone1src', '-name', + 'clone1'), + mock.call('lun', '-attach', '-name', 'clone1', + '-snapName', 'clone1src-temp-snapshot'), + mock.call('lun', '-list', '-name', 'clone1'), + mock.call('lun', '-list', '-name', 'clone1dest'), + mock.call('migrate', '-start', '-source', '30', '-dest', + '301', '-rate', 'ASAP', '-o'), + mock.call('lun', '-list', '-name', 'clone1', + '-attachedSnapshot'), + mock.call('snap', '-destroy', '-id', + 'clone1src-temp-snapshot', '-o')] + EMCVnxCli._cli_execute.assert_has_calls(expected) + + def test_create_volume_clone_sync_failed(self): + # case + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + self.testData.test_clone_e, + self.testData.test_clone_src) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('lun', '-list', '-name', 'clone1src'), + mock.call('snap', '-create', '-res', '22', '-name', + 'clone1src-temp-snapshot', '-allowReadWrite', + 'yes'), + mock.call('lun', '-create', '-type', 'NonThin', + '-capacity', 1, '-sq', 'gb', '-poolName', + 'unit_test_pool', '-name', 'clone1_edest'), + mock.call('lun', '-create', '-type', 'Snap', + '-primaryLunName', 'clone1src', '-name', + 'clone1_e'), + mock.call('lun', '-attach', '-name', 'clone1_e', + '-snapName', 'clone1src-temp-snapshot'), + mock.call('lun', '-list', '-name', 'clone1_e'), + mock.call('lun', '-list', '-name', 'clone1_edest'), + mock.call('migrate', '-start', '-source', '40', '-dest', + '401', '-rate', 'ASAP', '-o'), + mock.call('lun', '-list', '-name', 'clone1_e', + '-attachedSnapshot')] + EMCVnxCli._cli_execute.assert_has_calls(expected) + + def test_delete_volume_failed(self): + # case + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.delete_volume, + self.testData.test_failed_volume) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('lun', '-destroy', '-name', 'failed_vol1', + '-forceDetach', '-o')] + EMCVnxCli._cli_execute.assert_has_calls(expected) + + def test_extend_volume(self): + # case + self.driver.extend_volume(self.testData.test_volume, 2) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('lun', '-expand', '-name', 'vol1', '-capacity', + 2, '-sq', 'gb', '-o', '-ignoreThresholds')] + EMCVnxCli._cli_execute.assert_has_calls(expected) + + def test_extend_volume_has_snapshot(self): + # case + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.extend_volume, + self.testData.test_failed_volume, + 2) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('lun', '-expand', '-name', 'failed_vol1', + '-capacity', 2, '-sq', 'gb', '-o', + '-ignoreThresholds')] + EMCVnxCli._cli_execute.assert_has_calls(expected) + + def test_extend_volume_failed(self): + # case + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.extend_volume, + self.testData.test_failed_volume, + 3) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('lun', '-expand', '-name', 'failed_vol1', + '-capacity', 3, '-sq', 'gb', '-o', + '-ignoreThresholds')] + EMCVnxCli._cli_execute.assert_has_calls(expected) + + def test_create_remove_export(self): + # case + self.driver.create_export(None, self.testData.test_volume) + self.driver.remove_export(None, self.testData.test_volume) + expected = [mock.call('storagepool', '-list', '-name', + 'unit_test_pool', '-state'), + mock.call('lun', '-list', '-name', 'vol1')] + EMCVnxCli._cli_execute.assert_has_calls(expected) diff --git a/cinder/volume/drivers/emc/emc_cli_iscsi.py b/cinder/volume/drivers/emc/emc_cli_iscsi.py new file mode 100644 index 000000000..bb7e9fb6f --- /dev/null +++ b/cinder/volume/drivers/emc/emc_cli_iscsi.py @@ -0,0 +1,257 @@ +# Copyright (c) 2012 - 2014 EMC Corporation, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +iSCSI Drivers for EMC VNX array based on CLI. + +""" + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import utils +from cinder.volume import driver +from cinder.volume.drivers.emc import emc_vnx_cli + +LOG = logging.getLogger(__name__) + + +class EMCCLIISCSIDriver(driver.ISCSIDriver): + """EMC ISCSI Drivers for VNX using CLI.""" + + def __init__(self, *args, **kwargs): + + super(EMCCLIISCSIDriver, self).__init__(*args, **kwargs) + self.cli = emc_vnx_cli.EMCVnxCli( + 'iSCSI', + configuration=self.configuration) + + def check_for_setup_error(self): + pass + + def create_volume(self, volume): + """Creates a EMC(VMAX/VNX) volume.""" + self.cli.create_volume(volume) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + self.cli.create_volume_from_snapshot(volume, snapshot) + + def create_cloned_volume(self, volume, src_vref): + """Creates a cloned volume.""" + self.cli.create_cloned_volume(volume, src_vref) + + def delete_volume(self, volume): + """Deletes an EMC volume.""" + self.cli.delete_volume(volume) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + self.cli.create_snapshot(snapshot) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + self.cli.delete_snapshot(snapshot) + + def ensure_export(self, context, volume): + """Driver entry point to get the export info for an existing volume.""" + pass + + def create_export(self, context, volume): + """Driver entry point to get the export info for a new volume.""" + self.cli.create_export(context, volume) + + def remove_export(self, context, volume): + """Driver entry point to remove an export for a volume.""" + pass + + def check_for_export(self, context, volume_id): + """Make sure volume is exported.""" + pass + + def extend_volume(self, volume, new_size): + self.cli.extend_volume(volume, new_size) + + def initialize_connection(self, volume, connector): + """Initializes the connection and returns connection info. + + The iscsi driver returns a driver_volume_type of 'iscsi'. + the format of the driver data is defined in _get_iscsi_properties. + + :param volume: volume to be attached. + :param connector: connector information. + :returns: dictionary containing iscsi_properties. + Example return value: + { + 'driver_volume_type': 'iscsi' + 'data': { + 'target_discovered': True, + 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', + 'target_portal': '127.0.0.0.1:3260', + 'volume_id': 1, + } + } + """ + @utils.synchronized('emc-connection-' + connector['host'], + external=True) + def do_initialize_connection(): + self.cli.initialize_connection(volume, connector) + do_initialize_connection() + + iscsi_properties = self.vnx_get_iscsi_properties(volume, connector) + return { + 'driver_volume_type': 'iscsi', + 'data': { + 'target_discovered': True, + 'target_iqn': iscsi_properties['target_iqn'], + 'target_lun': iscsi_properties['target_lun'], + 'target_portal': iscsi_properties['target_portal'], + 'volume_id': iscsi_properties['volume_id'] + } + } + + def _do_iscsi_discovery(self, volume): + + LOG.warn(_("ISCSI provider_location not stored for volume %s, " + "using discovery.") % (volume['name'])) + + (out, _err) = self._execute('iscsiadm', '-m', 'discovery', + '-t', 'sendtargets', '-p', + self.configuration.iscsi_ip_address, + run_as_root=True) + targets = [] + for target in out.splitlines(): + targets.append(target) + + return targets + + # Rename this function to vnx_get_iscsi_properties because it has + # different input parameters from _get_iscsi_properties in the base class + def vnx_get_iscsi_properties(self, volume, connector): + """Gets iscsi configuration. + + We ideally get saved information in the volume entity, but fall back + to discovery if need be. Discovery may be completely removed in future + The properties are: + + :target_discovered: boolean indicating whether discovery was used + + :target_iqn: the IQN of the iSCSI target + + :target_portal: the portal of the iSCSI target + + :target_lun: the lun of the iSCSI target + + :volume_id: the id of the volume (currently used by xen) + + :auth_method:, :auth_username:, :auth_password: + + the authentication details. Right now, either auth_method is not + present meaning no authentication, or auth_method == `CHAP` + meaning use CHAP with the specified credentials. + """ + properties = {} + + location = self._do_iscsi_discovery(volume) + if not location: + raise exception.InvalidVolume(_("Could not find iSCSI export " + " for volume %s") % + (volume['name'])) + + LOG.debug(_("ISCSI Discovery: Found %s") % (location)) + properties['target_discovered'] = True + + hostname = connector['host'] + storage_group = hostname + device_info = self.cli.find_device_details(volume, storage_group) + if device_info is None or device_info['hostlunid'] is None: + exception_message = (_("Cannot find device number for volume %s") + % volume['name']) + raise exception.VolumeBackendAPIException(data=exception_message) + + device_number = device_info['hostlunid'] + device_sp = device_info['ownersp'] + endpoints = [] + + if device_sp: + # endpoints example: + # [iqn.1992-04.com.emc:cx.apm00123907237.a8, + # iqn.1992-04.com.emc:cx.apm00123907237.a9] + endpoints = self.cli._find_iscsi_protocol_endpoints(device_sp) + + foundEndpoint = False + for loc in location: + results = loc.split(" ") + properties['target_portal'] = results[0].split(",")[0] + properties['target_iqn'] = results[1] + # for VNX, find the target_iqn that matches the endpoint + # target_iqn example: iqn.1992-04.com.emc:cx.apm00123907237.a8 + # or iqn.1992-04.com.emc:cx.apm00123907237.b8 + if not device_sp: + break + for endpoint in endpoints: + if properties['target_iqn'] == endpoint: + LOG.debug(_("Found iSCSI endpoint: %s") % endpoint) + foundEndpoint = True + break + if foundEndpoint: + break + + if device_sp and not foundEndpoint: + LOG.warn(_("ISCSI endpoint not found for SP %(sp)s ") + % {'sp': device_sp}) + + properties['target_lun'] = device_number + + properties['volume_id'] = volume['id'] + + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + LOG.debug(_("ISCSI properties: %s") % (properties)) + + return properties + + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + @utils.synchronized('emc-connection-' + connector['host'], + external=True) + def do_terminate_connection(): + self.cli.terminate_connection(volume, connector) + do_terminate_connection() + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If 'refresh' is True, run update the stats first. + """ + if refresh: + self.update_volume_status() + LOG.info(_("update_volume_status:%s"), self._stats) + + return self._stats + + def update_volume_status(self): + """Retrieve status info from volume group.""" + LOG.debug(_("Updating volume status")) + # retrieving the volume update from the VNX + data = self.cli.update_volume_status() + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or 'EMCCLIISCSIDriver' + data['storage_protocol'] = 'iSCSI' + self._stats = data diff --git a/cinder/volume/drivers/emc/emc_vnx_cli.py b/cinder/volume/drivers/emc/emc_vnx_cli.py new file mode 100644 index 000000000..4db39dabe --- /dev/null +++ b/cinder/volume/drivers/emc/emc_vnx_cli.py @@ -0,0 +1,746 @@ +# Copyright (c) 2012 - 2014 EMC Corporation, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +VNX CLI on iSCSI. +""" + +import os +import time + +from oslo.config import cfg + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common import loopingcall +from cinder.openstack.common import processutils +from cinder import utils +from cinder.volume.drivers.san import san +from cinder.volume import volume_types + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF +VERSION = '02.00.00' + +loc_opts = [ + cfg.StrOpt('naviseccli_path', + default='', + help='Naviseccli Path'), + cfg.StrOpt('storage_vnx_pool_name', + default=None, + help='ISCSI pool name'), + cfg.IntOpt('default_timeout', + default=20, + help='Default Time Out For CLI operations in minutes'), + cfg.IntOpt('max_luns_per_storage_group', + default=256, + help='Default max number of LUNs in a storage group'), ] + +CONF.register_opts(loc_opts) + + +class EMCVnxCli(): + """This class defines the functions to use the native CLI functionality.""" + + stats = {'driver_version': VERSION, + 'free_capacity_gb': 'unknown', + 'reserved_percentage': 0, + 'storage_protocol': None, + 'total_capacity_gb': 'unknown', + 'vendor_name': 'EMC', + 'volume_backend_name': None} + + def __init__(self, prtcl, configuration=None): + + self.protocol = prtcl + self.configuration = configuration + self.configuration.append_config_values(loc_opts) + self.configuration.append_config_values(san.san_opts) + self.storage_ip = self.configuration.san_ip + self.storage_username = self.configuration.san_login + self.storage_password = self.configuration.san_password + + self.pool_name = self.configuration.storage_vnx_pool_name + if not self.pool_name: + msg = (_('Pool name is not specified.')) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.timeout = self.configuration.default_timeout + self.max_luns = self.configuration.max_luns_per_storage_group + self.hlu_set = set(xrange(1, self.max_luns + 1)) + self.navisecclipath = self.configuration.naviseccli_path + self.cli_prefix = (self.navisecclipath, '-address', self.storage_ip) + self.cli_credentials = () + self.wait_interval = 3 + + # if there is a username/password provided, use those in the cmd line + if self.storage_username is not None and \ + self.storage_password is not None: + self.cli_credentials += ('-user', self.storage_username, + '-password', self.storage_password, + '-scope', '0') + + # Checking for existence of naviseccli tool + if not os.path.exists(self.navisecclipath): + msg = (_('Could not find NAVISECCLI tool.')) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Testing the naviseccli setup + query_list = ("storagepool", "-list", + "-name", self.pool_name, "-state") + out, rc = self._cli_execute(*query_list) + if rc != 0: + LOG.error(_("Failed to find pool %s"), self.pool_name) + raise exception.VolumeBackendAPIException(data=out) + + def _cli_execute(self, *cmd, **kwargv): + if "check_exit_code" not in kwargv.keys(): + kwargv["check_exit_code"] = True + rc = 0 + try: + out, _err = utils.execute(*(self.cli_prefix + + self.cli_credentials + cmd), **kwargv) + except processutils.ProcessExecutionError as pe: + rc = pe.exit_code + out = pe.stdout + pe.stderr + return out, rc + + def create_volume(self, volume): + """Creates a EMC volume.""" + + LOG.debug(_('Entering create_volume.')) + volumesize = volume['size'] + volumename = volume['name'] + + LOG.info(_('Create Volume: %(volume)s Size: %(size)s') + % {'volume': volumename, + 'size': volumesize}) + + # defining CLI command + thinness = self._get_provisioning_by_volume(volume) + + # executing CLI command to create volume + LOG.debug(_('Create Volume: %(volumename)s') + % {'volumename': volumename}) + + lun_create = ('lun', '-create', + '-type', thinness, + '-capacity', volumesize, + '-sq', 'gb', + '-poolName', self.pool_name, + '-name', volumename) + out, rc = self._cli_execute(*lun_create) + LOG.debug(_('Create Volume: %(volumename)s Return code: %(rc)s') + % {'volumename': volumename, + 'rc': rc}) + if rc == 4: + LOG.warn(_('Volume %s already exists'), volumename) + elif rc != 0: + msg = (_('Failed to create %(volumename)s: %(out)s') % + {'volumename': volumename, 'out': out}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # wait for up to a minute to verify that the LUN has progressed + # to Ready state + def _wait_for_lun_ready(volumename): + # executing cli command to check volume + command_to_verify = ('lun', '-list', '-name', volumename) + out, rc = self._cli_execute(*command_to_verify) + if rc == 0 and out.find("Ready") > -1: + raise loopingcall.LoopingCallDone() + if int(time.time()) - self.start_lun_ready > self.timeout * 60: + msg = (_('LUN %s failed to become Ready'), volumename) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.start_lun_ready = int(time.time()) + timer = loopingcall.FixedIntervalLoopingCall( + _wait_for_lun_ready, volumename) + timer.start(interval=self.wait_interval).wait() + + def delete_volume(self, volume): + """Deletes an EMC volume.""" + + LOG.debug(_('Entering delete_volume.')) + volumename = volume['name'] + # defining CLI command + lun_destroy = ('lun', '-destroy', + '-name', volumename, + '-forceDetach', '-o') + + # executing CLI command to delete volume + out, rc = self._cli_execute(*lun_destroy) + LOG.debug(_('Delete Volume: %(volumename)s Output: %(out)s') + % {'volumename': volumename, 'out': out}) + if rc not in (0, 9): + msg = (_('Failed to destroy %s'), volumename) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def extend_volume(self, volume, new_size): + """Extends an EMC volume.""" + + LOG.debug(_('Entering extend_volume.')) + volumename = volume['name'] + + # defining CLI command + lun_expand = ('lun', '-expand', + '-name', volumename, + '-capacity', new_size, + '-sq', 'gb', + '-o', '-ignoreThresholds') + + # executing CLI command to extend volume + out, rc = self._cli_execute(*lun_expand) + + LOG.debug(_('Extend Volume: %(volumename)s Output: %(out)s') + % {'volumename': volumename, + 'out': out}) + if rc == 97: + msg = (_('The LUN cannot be expanded or shrunk because ' + 'it has snapshots. Command to extend the specified ' + 'volume failed.')) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if rc != 0: + msg = (_('Failed to expand %s'), volumename) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def update_volume_status(self): + """Retrieve status info.""" + LOG.debug(_("Updating volume status")) + + poolname = self.pool_name + pool_list = ('storagepool', '-list', + '-name', poolname, + '-userCap', '-availableCap') + out, rc = self._cli_execute(*pool_list) + if rc == 0: + pool_details = out.split('\n') + self.stats['total_capacity_gb'] = float( + pool_details[3].split(':')[1].strip()) + self.stats['free_capacity_gb'] = float( + pool_details[5].split(':')[1].strip()) + else: + msg = (_('Failed to list %s'), poolname) + LOG.error(msg) + + return self.stats + + def create_export(self, context, volume): + """Driver entry point to get the export info for a new volume.""" + volumename = volume['name'] + + device_id = self._find_lun_id(volumename) + + LOG.debug(_('create_export: Volume: %(volume)s Device ID: ' + '%(device_id)s') + % {'volume': volumename, + 'device_id': device_id}) + + return {'provider_location': device_id} + + def _find_lun_id(self, volumename): + """Returns the LUN of a volume.""" + + lun_list = ('lun', '-list', '-name', volumename) + + out, rc = self._cli_execute(*lun_list) + if rc == 0: + vol_details = out.split('\n') + lun = vol_details[0].split(' ')[3] + else: + msg = (_('Failed to list %s'), volumename) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return lun + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + LOG.debug(_('Entering create_snapshot.')) + snapshotname = snapshot['name'] + volumename = snapshot['volume_name'] + LOG.info(_('Create snapshot: %(snapshot)s: volume: %(volume)s') + % {'snapshot': snapshotname, + 'volume': volumename}) + + volume_lun = self._find_lun_id(volumename) + + # defining CLI command + snap_create = ('snap', '-create', + '-res', volume_lun, + '-name', snapshotname, + '-allowReadWrite', 'yes') + # executing CLI command to create snapshot + out, rc = self._cli_execute(*snap_create) + + LOG.debug(_('Create Snapshot: %(snapshotname)s Unity: %(out)s') + % {'snapshotname': snapshotname, + 'out': out}) + if rc != 0: + msg = (_('Failed to create snap %s'), snapshotname) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + LOG.debug(_('Entering delete_snapshot.')) + + snapshotname = snapshot['name'] + volumename = snapshot['volume_name'] + LOG.info(_('Delete Snapshot: %(snapshot)s: volume: %(volume)s') + % {'snapshot': snapshotname, + 'volume': volumename}) + + def _wait_for_snap_delete(snapshot): + # defining CLI command + snapshotname = snapshot['name'] + volumename = snapshot['volume_name'] + snap_destroy = ('snap', '-destroy', '-id', snapshotname, '-o') + # executing CLI command + out, rc = self._cli_execute(*snap_destroy) + + LOG.debug(_('Delete Snapshot: Volume: %(volumename)s Snapshot: ' + '%(snapshotname)s Output: %(out)s') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'out': out}) + + if rc not in [0, 9, 5]: + if rc == 13: + if int(time.time()) - self.start_snap_delete < \ + self.timeout * 60: + LOG.info(_('Snapshot %s is in use'), snapshotname) + else: + msg = (_('Failed to destroy %s ' + ' because snapshot is in use.'), snapshotname) + LOG.error(msg) + raise exception.SnapshotIsBusy(data=msg) + else: + msg = (_('Failed to destroy %s'), snapshotname) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + raise loopingcall.LoopingCallDone() + + self.start_snap_delete = int(time.time()) + timer = loopingcall.FixedIntervalLoopingCall( + _wait_for_snap_delete, snapshot) + timer.start(interval=self.wait_interval).wait() + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + + LOG.debug(_('Entering create_volume_from_snapshot.')) + + snapshotname = snapshot['name'] + source_volume_name = snapshot['volume_name'] + volumename = volume['name'] + volumesize = snapshot['volume_size'] + + destvolumename = volumename + 'dest' + + # Create a mount point, migrate data from source (snapshot) to + # destination volume. The destination volume is the only new volume + # to be created here. + LOG.info(_('Creating Destination Volume : %s ') % (destvolumename)) + + poolname = self.pool_name + thinness = self._get_provisioning_by_volume(volume) + # defining CLI command + lun_create = ('lun', '-create', '-type', thinness, + '-capacity', volumesize, '-sq', 'gb', + '-poolName', poolname, + '-name', destvolumename) + # executing CLI command + out, rc = self._cli_execute(*lun_create) + + LOG.debug(_('Create temporary Volume: %(volumename)s ' + 'Output : %(out)s') + % {'volumename': destvolumename, 'out': out}) + + if rc != 0: + msg = (_('Command to create the destination volume failed')) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # defining CLI command + smp_create = ('lun', '-create', '-type', 'Snap', + '-primaryLunName', source_volume_name, + '-name', volumename) + + # executing CLI command + out, rc = self._cli_execute(*smp_create) + LOG.debug(_('Create mount point : Volume: %(volumename)s ' + 'Source Volume: %(sourcevolumename)s Output: %(out)s') + % {'volumename': volumename, + 'sourcevolumename': source_volume_name, + 'out': out}) + + if rc != 0: + msg = (_('Failed to create SMP %s'), volumename) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # defining CLI command + lun_attach = ('lun', '-attach', + '-name', volumename, + '-snapName', snapshotname) + + # executing CLI command + out, rc = self._cli_execute(*lun_attach) + LOG.debug(_('Attaching mount point Volume: %(volumename)s ' + 'with Snapshot: %(snapshotname)s Output: %(out)s') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'out': out}) + + if rc != 0: + msg = (_('Failed to attach snapshotname %s'), snapshotname) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + source_vol_lun = self._find_lun_id(volumename) + dest_vol_lun = self._find_lun_id(destvolumename) + + LOG.info(_('Migrating Mount Point Volume: %s ') % (volumename)) + + # defining CLI command + migrate_start = ('migrate', '-start', + '-source', source_vol_lun, + '-dest', dest_vol_lun, + '-rate', 'ASAP', '-o') + + # executing CLI command + out, rc = self._cli_execute(*migrate_start) + + LOG.debug(_('Migrate Mount Point Volume: %(volumename)s ' + 'Output : %(out)s') + % {'volumename': volumename, + 'out': out}) + + if rc != 0: + msg = (_('Failed to start migrating SMP %s'), volumename) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.sync_status = False + + def _wait_for_sync_status(volumename): + lun_list = ('lun', '-list', '-name', volumename, + '-attachedSnapshot') + out, rc = self._cli_execute(*lun_list) + if rc == 0: + vol_details = out.split('\n') + snapshotname = vol_details[2].split(':')[1].strip() + if (snapshotname == 'N/A'): + self.sync_status = True + raise loopingcall.LoopingCallDone() + else: + LOG.info(_('Waiting for the update on Sync status of %s '), + volumename) + if int(time.time()) - self.start_status >= self.timeout * 60: + raise loopingcall.LoopingCallDone() + + self.start_status = int(time.time()) + timer = loopingcall.FixedIntervalLoopingCall( + _wait_for_sync_status, volumename) + timer.start(interval=self.wait_interval).wait() + + if not self.sync_status: + msg = (_('Failed to really migrate %s'), volumename) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + + source_volume_name = src_vref['name'] + volumesize = src_vref['size'] + snapshotname = source_volume_name + '-temp-snapshot' + + snapshot = { + 'name': snapshotname, + 'volume_name': source_volume_name, + 'volume_size': volumesize, + } + + # Create temp Snapshot + self.create_snapshot(snapshot) + + try: + # Create volume + self.create_volume_from_snapshot(volume, snapshot) + except Exception: + msg = (_('Failed to create cloned volume %s'), volume['name']) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + finally: + # Delete temp Snapshot + self.delete_snapshot(snapshot) + + def get_storage_group(self, hostname): + """Returns the storage group for the host node.""" + + storage_groupname = hostname + + sg_list = ('storagegroup', '-list', '-gname', storage_groupname) + + out, rc = self._cli_execute(*sg_list) + + if rc != 0: + LOG.debug(_('creating new storage group %s'), storage_groupname) + + sg_create = ('storagegroup', '-create', + '-gname', storage_groupname) + out, rc = self._cli_execute(*sg_create) + LOG.debug(_('Create new storage group : %(storage_groupname)s, ' + 'Output: %(out)s') + % {'storage_groupname': storage_groupname, + 'out': out}) + + if rc != 0: + msg = (_('Failed to create SG %s'), storage_groupname) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # connecting the new storagegroup to the host + connect_host = ('storagegroup', '-connecthost', + '-host', hostname, + '-gname', storage_groupname, + '-o') + + out, rc = self._cli_execute(*connect_host) + LOG.debug(_('Connect storage group : %(storage_groupname)s ,' + 'To Host : %(hostname)s, Output : %(out)s') + % {'storage_groupname': storage_groupname, + 'hostname': hostname, + 'out': out}) + + if rc != 0: + msg = (_('Failed to connect %s'), hostname) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return hostname + + def find_device_details(self, volume, storage_group): + """Returns the Host Device number for the volume.""" + + allocated_lun_id = self._find_lun_id(volume["name"]) + host_lun_id = -1 + owner_sp = "" + lun_map = {} + + sg_list = ('storagegroup', '-list', '-gname', storage_group) + out, rc = self._cli_execute(*sg_list) + if out.find('HLU/ALU Pairs') == -1: + LOG.info(_('NO LUNs in the storagegroup : %s ') + % (storage_group)) + else: + sg_details = out.split('HLU/ALU Pairs:')[1] + sg_lun_details = sg_details.split('Shareable')[0] + lun_details = sg_lun_details.split('\n') + + for data in lun_details: + if data not in ['', ' HLU Number ALU Number', + ' ---------- ----------']: + data = data.strip() + items = data.split(' ') + lun_map[int(items[len(items) - 1])] = int(items[0]) + for lun in lun_map.iterkeys(): + if lun == int(allocated_lun_id): + host_lun_id = lun_map[lun] + LOG.debug(_('Host Lun Id : %s') % (host_lun_id)) + break + + # finding the owner SP for the LUN + lun_list = ('lun', '-list', '-l', allocated_lun_id, '-owner') + out, rc = self._cli_execute(*lun_list) + if rc == 0: + output = out.split('\n') + owner_sp = output[2].split('Current Owner: SP ')[1] + LOG.debug(_('Owner SP : %s') % (owner_sp)) + + device = { + 'hostlunid': host_lun_id, + 'ownersp': owner_sp, + 'lunmap': lun_map, + } + return device + + def _get_host_lun_id(self, host_lun_id_list): + # Returns the host lun id for the LUN to be added + # in the storage group. + + used_hlu_set = set(host_lun_id_list) + for hlu in self.hlu_set - used_hlu_set: + return hlu + return None + + def _add_lun_to_storagegroup(self, volume, storage_group): + + storage_groupname = storage_group + volumename = volume['name'] + allocated_lun_id = self._find_lun_id(volumename) + count = 0 + while(count < 5): + device_info = self.find_device_details(volume, storage_group) + device_number = device_info['hostlunid'] + if device_number < 0: + lun_map = device_info['lunmap'] + if lun_map: + host_lun_id_list = lun_map.values() + + if len(host_lun_id_list) >= self.max_luns: + msg = (_('The storage group has reached the ' + 'maximum capacity of LUNs. ' + 'Command to add LUN for volume - %s ' + 'in storagegroup failed') % (volumename)) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + host_lun_id = self._get_host_lun_id(host_lun_id_list) + + if host_lun_id is None: + msg = (_('Unable to get new host lun id. Please ' + 'check if the storage group can accomodate ' + 'new LUN. ' + 'Command to add LUN for volume - %s ' + 'in storagegroup failed') % (volumename)) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + host_lun_id = 1 + + addhlu = ('storagegroup', '-addhlu', '-o', + '-gname', storage_groupname, + '-hlu', host_lun_id, + '-alu', allocated_lun_id) + out, rc = self._cli_execute(*addhlu) + LOG.debug(_('Add ALU %(alu)s to SG %(sg)s as %(hlu)s. ' + 'Output: %(out)s') + % {'alu': allocated_lun_id, + 'sg': storage_groupname, + 'hlu': host_lun_id, + 'out': out}) + if rc == 0: + return host_lun_id + if rc == 66: + LOG.warn(_('Requested Host LUN Number already in use')) + count += 1 + else: + LOG.warn(_('LUN was already added in the storage group')) + return device_number + + if count == 5: + msg = (_('Failed to add %s into SG') % (volumename)) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _remove_lun_from_storagegroup(self, device_number, storage_group): + + storage_groupname = storage_group + removehlu = ('storagegroup', '-removehlu', + '-gname', storage_groupname, + '-hlu', device_number, + '-o') + + out, rc = self._cli_execute(*removehlu) + + LOG.debug(_('Remove %(hlu)s from SG %(sg)s. Output: %(out)s') + % {'hlu': device_number, + 'sg': storage_groupname, + 'out': out}) + if rc != 0: + msg = (_('Failed to remove %(hlu)s from %(sg)s') + % {'hlu': device_number, 'sg': storage_groupname}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def initialize_connection(self, volume, connector): + """Initializes the connection and returns connection info.""" + + hostname = connector['host'] + storage_group = self.get_storage_group(hostname) + + device_number = self._add_lun_to_storagegroup(volume, storage_group) + return device_number + + def terminate_connection(self, volume, connector): + """Disallow connection from connector.""" + hostname = connector['host'] + storage_group = self.get_storage_group(hostname) + device_info = self.find_device_details(volume, storage_group) + device_number = device_info['hostlunid'] + if device_number < 0: + LOG.error(_('Could not locate the attached volume.')) + else: + self._remove_lun_from_storagegroup(device_number, storage_group) + + def _find_iscsi_protocol_endpoints(self, device_sp): + """Returns the iSCSI initiators for a SP.""" + + initiator_address = [] + + connection_getport = ('connection', '-getport', '-sp', device_sp) + out, _rc = self._cli_execute(*connection_getport) + output = out.split('SP: ') + + for port in output: + port_info = port.split('\n') + if port_info[0] == device_sp: + port_wwn = port_info[2].split('Port WWN:')[1].strip() + initiator_address.append(port_wwn) + + LOG.debug(_('WWNs found for SP %(devicesp)s ' + 'are: %(initiator_address)s') + % {'devicesp': device_sp, + 'initiator_address': initiator_address}) + + return initiator_address + + def _get_volumetype_extraspecs(self, volume): + specs = {} + + type_id = volume['volume_type_id'] + if type_id is not None: + specs = volume_types.get_volume_type_extra_specs(type_id) + + return specs + + def _get_provisioning_by_volume(self, volume): + # By default, the user can not create thin LUN without thin + # provisioning enabler. + thinness = 'NonThin' + spec_id = 'storagetype:provisioning' + + specs = self._get_volumetype_extraspecs(volume) + if specs and spec_id in specs: + provisioning = specs[spec_id].lower() + if 'thin' == provisioning: + thinness = 'Thin' + elif 'thick' != provisioning: + LOG.warning(_('Invalid value of extra spec ' + '\'storagetype:provisioning\': %(provisioning)s') + % {'provisioning': specs[spec_id]}) + else: + LOG.info(_('No extra spec \'storagetype:provisioning\' exist')) + + return thinness diff --git a/etc/cinder/cinder.conf.sample b/etc/cinder/cinder.conf.sample index 125de45ce..8b7efc272 100644 --- a/etc/cinder/cinder.conf.sample +++ b/etc/cinder/cinder.conf.sample @@ -1061,6 +1061,25 @@ #cinder_emc_config_file=/etc/cinder/cinder_emc_config.xml +# +# Options defined in cinder.volume.drivers.emc.emc_vnx_cli +# + +# Naviseccli Path (string value) +#naviseccli_path= + +# ISCSI pool name (string value) +#storage_vnx_pool_name= + +# Default Time Out For CLI operations in minutes (integer +# value) +#default_timeout=20 + +# Default max number of LUNs in a storage group (integer +# value) +#max_luns_per_storage_group=256 + + # # Options defined in cinder.volume.drivers.eqlx # -- 2.45.2