from cinder.volume.drivers.ibm import gpfs as cinder_volume_drivers_ibm_gpfs
from cinder.volume.drivers.ibm import ibmnas as \
cinder_volume_drivers_ibm_ibmnas
-import cinder.volume.drivers.ibm.storwize_svc
+from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_common as \
+ cinder_volume_drivers_ibm_storwize_svc_storwizesvccommon
+from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_fc as \
+ cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc
+from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_iscsi as \
+ cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi
from cinder.volume.drivers.ibm import xiv_ds8k as \
cinder_volume_drivers_ibm_xivds8k
from cinder.volume.drivers.infortrend.eonstor_ds_cli import common_cli as \
cinder_volume_drivers_netapp_options.netapp_eseries_opts,
cinder_volume_drivers_netapp_options.netapp_nfs_extra_opts,
cinder_volume_drivers_netapp_options.netapp_san_opts,
+ cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi.
+ storwize_svc_iscsi_opts,
cinder_backup_drivers_glusterfs.glusterfsbackup_service_opts,
cinder_backup_drivers_tsm.tsm_opts,
cinder_volume_drivers_san_hp_hpxpopts.FC_VOLUME_OPTS,
cinder_volume_drivers_san_hp_hpxpopts.COMMON_VOLUME_OPTS,
cinder_volume_drivers_san_hp_hpxpopts.HORCM_VOLUME_OPTS,
cinder_test.test_opts,
- cinder.volume.drivers.ibm.storwize_svc.storwize_svc_opts,
+ cinder_volume_drivers_ibm_gpfs.gpfs_opts,
cinder_volume_drivers_violin_v7000common.violin_opts,
cinder_exception.exc_log_opts,
cinder_common_config.global_opts,
cinder_scheduler_weights_capacity.capacity_weight_opts,
cinder_volume_drivers_sheepdog.sheepdog_opts,
- cinder_volume_drivers_ibm_gpfs.gpfs_opts,
[cinder_api_middleware_sizelimit.max_request_body_size_opt],
cinder_volume_drivers_solidfire.sf_opts,
cinder_volume_drivers_ibm_ibmnas.platform_opts,
volume_number_weight_opts,
cinder_volume_drivers_coho.coho_opts,
cinder_volume_drivers_xio.XIO_OPTS,
+ cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc.
+ storwize_svc_fc_opts,
cinder_volume_drivers_zfssa_zfssaiscsi.ZFSSA_OPTS,
cinder_volume_driver.volume_opts,
cinder_volume_driver.iser_opts,
cinder_volume_drivers_tintri.tintri_opts,
cinder_volume_drivers_hitachi_hbsdhorcm.volume_opts,
cinder_backup_manager.backup_manager_opts,
+ cinder_volume_drivers_ibm_storwize_svc_storwizesvccommon.
+ storwize_svc_opts,
cinder_volume_drivers_hitachi_hbsdfc.volume_opts,
cinder_quota.quota_opts,
cinder_volume_drivers_huawei_huaweidriver.huawei_opts,
-# Copyright 2013 IBM Corp.
+# Copyright 2015 IBM Corp.
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
from oslo_concurrency import processutils
from oslo_utils import importutils
from oslo_utils import units
+import six
from cinder import context
from cinder import exception
from cinder.tests.unit import utils as testutils
from cinder import utils
from cinder.volume import configuration as conf
-from cinder.volume.drivers.ibm import storwize_svc
from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_common
+from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_fc
+from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_iscsi
from cinder.volume import qos_specs
from cinder.volume import volume_types
ids.sort()
for index, n in enumerate(ids):
if n > index:
- return str(index)
- return str(len(ids))
+ return six.text_type(index)
+ return six.text_type(len(ids))
# Check if name is valid
@staticmethod
num = num * 1024
unit_index += 1
- return str(num)
+ return six.text_type(num)
def _cmd_lslicense(self, **kwargs):
rows = [None] * 3
'real_capacity', 'overallocation', 'warning',
'easy_tier', 'easy_tier_status']
rows[1] = ['1', self._flags['storwize_svc_volpool_name'], 'online',
- '1', str(len(self._volumes_list)), '3573412790272',
- '256', '3529926246400', '1693247906775', '277841182',
- '38203734097', '47', '80', 'auto', 'inactive']
+ '1', six.text_type(len(self._volumes_list)),
+ '3573412790272', '256', '3529926246400', '1693247906775',
+ '277841182', '38203734097', '47', '80', 'auto',
+ 'inactive']
rows[2] = ['2', 'openstack2', 'online',
'1', '0', '3573412790272', '256',
'3529432325160', '1693247906775', '277841182',
curr_size = int(self._volumes_list[vol_name]['capacity'])
addition = size * units.Gi
- self._volumes_list[vol_name]['capacity'] = str(curr_size + addition)
+ self._volumes_list[vol_name]['capacity'] = (
+ six.text_type(curr_size + addition))
return ('', '')
def _get_fcmap_info(self, vol_name):
cap = self._convert_bytes_units(vol['capacity'])
else:
cap = vol['capacity']
- rows.append([str(vol['id']), vol['name'], vol['IO_group_id'],
+ rows.append([six.text_type(vol['id']), vol['name'],
+ vol['IO_group_id'],
vol['IO_group_name'], 'online', '0',
self._flags['storwize_svc_volpool_name'],
cap, 'striped',
item = self._convert_bytes_units(item)
rows = []
- rows.append(['id', str(vol['id'])])
+ rows.append(['id', six.text_type(vol['id'])])
rows.append(['name', vol['name']])
rows.append(['IO_group_id', vol['IO_group_id']])
rows.append(['IO_group_name', vol['IO_group_name']])
if 'name' in kwargs:
host_name = kwargs['name'].strip('\'\"')
else:
- host_name = 'host' + str(host_info['id'])
+ host_name = 'host' + six.text_type(host_info['id'])
if self._is_invalid_name(host_name):
return self._errors['CMMVC6527E']
filter_value = kwargs['filtervalue'].split('=')[1]
to_delete = []
for k, v in self._fcmappings_list.items():
- if str(v[filter_key]) == filter_value:
+ if six.text_type(v[filter_key]) == filter_value:
source = self._volumes_list[v['source']]
target = self._volumes_list[v['target']]
self._state_transition('wait', v)
if self._fcconsistgrp_list[cg_id]['name'] == kwargs['obj']:
fcconsistgrp = self._fcconsistgrp_list[cg_id]
rows = []
- rows.append(['id', str(cg_id)])
+ rows.append(['id', six.text_type(cg_id)])
rows.append(['name', fcconsistgrp['name']])
rows.append(['status', fcconsistgrp['status']])
- rows.append(['autodelete', str(fcconsistgrp['autodelete'])])
- rows.append(['start_time', str(fcconsistgrp['start_time'])])
+ rows.append(['autodelete',
+ six.text_type(fcconsistgrp['autodelete'])])
+ rows.append(['start_time',
+ six.text_type(fcconsistgrp['start_time'])])
for fcmap_id in fcconsistgrp['fcmaps'].keys():
- rows.append(['FC_mapping_id', str(fcmap_id)])
+ rows.append(['FC_mapping_id', six.text_type(fcmap_id)])
rows.append(['FC_mapping_name',
fcconsistgrp['fcmaps'][fcmap_id]])
raise exception.InvalidInput(reason=msg)
-class StorwizeSVCFakeDriver(storwize_svc.StorwizeSVCDriver):
+class StorwizeSVCISCSIFakeDriver(storwize_svc_iscsi.StorwizeSVCISCSIDriver):
+ def __init__(self, *args, **kwargs):
+ super(StorwizeSVCISCSIFakeDriver, self).__init__(*args, **kwargs)
+
+ def set_fake_storage(self, fake):
+ self.fake_storage = fake
+
+ def _run_ssh(self, cmd, check_exit_code=True, attempts=1):
+ utils.check_ssh_injection(cmd)
+ ret = self.fake_storage.execute_command(cmd, check_exit_code)
+
+ return ret
+
+
+class StorwizeSVCFcFakeDriver(storwize_svc_fc.StorwizeSVCFCDriver):
def __init__(self, *args, **kwargs):
- super(StorwizeSVCFakeDriver, self).__init__(*args, **kwargs)
+ super(StorwizeSVCFcFakeDriver, self).__init__(*args, **kwargs)
def set_fake_storage(self, fake):
self.fake_storage = fake
return ret
-class StorwizeSVCDriverTestCase(test.TestCase):
+class StorwizeSVCISCSIDriverTestCase(test.TestCase):
@mock.patch.object(time, 'sleep')
def setUp(self, mock_sleep):
- super(StorwizeSVCDriverTestCase, self).setUp()
+ super(StorwizeSVCISCSIDriverTestCase, self).setUp()
self.USESIM = True
if self.USESIM:
- self.driver = StorwizeSVCFakeDriver(
+ self.iscsi_driver = StorwizeSVCISCSIFakeDriver(
configuration=conf.Configuration(None))
self._def_flags = {'san_ip': 'hostname',
'san_login': 'user',
'storwize_svc_volpool_name': 'openstack',
'storwize_svc_flashcopy_timeout': 20,
'storwize_svc_flashcopy_rate': 49,
- # Test ignore capitalization
- 'storwize_svc_connection_protocol': 'iScSi',
+ 'storwize_svc_multipath_enabled': False,
'storwize_svc_allow_tenant_qos': True}
- wwpns = [str(random.randint(0, 9999999999999999)).zfill(16),
- str(random.randint(0, 9999999999999999)).zfill(16)]
- initiator = 'test.initiator.%s' % str(random.randint(10000, 99999))
+ wwpns = [
+ six.text_type(random.randint(0, 9999999999999999)).zfill(16),
+ six.text_type(random.randint(0, 9999999999999999)).zfill(16)]
+ initiator = 'test.initiator.%s' % six.text_type(
+ random.randint(10000, 99999))
self._connector = {'ip': '1.234.56.78',
'host': 'storwize-svc-test',
'wwpns': wwpns,
'initiator': initiator}
self.sim = StorwizeSVCManagementSimulator('openstack')
- self.driver.set_fake_storage(self.sim)
+ self.iscsi_driver.set_fake_storage(self.sim)
self.ctxt = context.get_admin_context()
- else:
- self.driver = storwize_svc.StorwizeSVCDriver(
- configuration=conf.Configuration(None))
- self._def_flags = {'san_ip': '1.111.11.11',
- 'san_login': 'user',
- 'san_password': 'password',
- 'storwize_svc_volpool_name': 'openstack',
- # Test ignore capitalization
- 'storwize_svc_connection_protocol': 'iScSi',
- 'storwize_svc_allow_tenant_qos': True,
- 'ssh_conn_timeout': 0}
- config_group = self.driver.configuration.config_group
- self.driver.configuration.set_override('rootwrap_config',
- '/etc/cinder/rootwrap.conf',
- config_group)
- self._connector = utils.brick_get_connector_properties()
self._reset_flags()
self.ctxt = context.get_admin_context()
- db_driver = self.driver.configuration.db_driver
+ db_driver = self.iscsi_driver.configuration.db_driver
self.db = importutils.import_module(db_driver)
- self.driver.db = self.db
- self.driver.do_setup(None)
- self.driver.check_for_setup_error()
- self.driver._helpers.check_fcmapping_interval = 0
+ self.iscsi_driver.db = self.db
+ self.iscsi_driver.do_setup(None)
+ self.iscsi_driver.check_for_setup_error()
+ self.iscsi_driver._helpers.check_fcmapping_interval = 0
def _set_flag(self, flag, value):
- group = self.driver.configuration.config_group
- self.driver.configuration.set_override(flag, value, group)
+ group = self.iscsi_driver.configuration.config_group
+ self.iscsi_driver.configuration.set_override(flag, value, group)
def _reset_flags(self):
- self.driver.configuration.local_conf.reset()
+ self.iscsi_driver.configuration.local_conf.reset()
for k, v in self._def_flags.items():
self._set_flag(k, v)
- def _assert_vol_exists(self, name, exists):
- is_vol_defined = self.driver._helpers.is_vdisk_defined(name)
- self.assertEqual(exists, is_vol_defined)
-
- def test_storwize_svc_connectivity(self):
- # Make sure we detect if the pool doesn't exist
- no_exist_pool = 'i-dont-exist-%s' % random.randint(10000, 99999)
- self._set_flag('storwize_svc_volpool_name', no_exist_pool)
- self.assertRaises(exception.InvalidInput,
- self.driver.do_setup, None)
- self._reset_flags()
-
- # Check the case where the user didn't configure IP addresses
- # as well as receiving unexpected results from the storage
- if self.USESIM:
- self.sim.error_injection('lsnodecanister', 'header_mismatch')
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.do_setup, None)
- self.sim.error_injection('lsnodecanister', 'remove_field')
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.do_setup, None)
- self.sim.error_injection('lsportip', 'header_mismatch')
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.do_setup, None)
- self.sim.error_injection('lsportip', 'remove_field')
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.do_setup, None)
-
- # Check with bad parameters
- self._set_flag('san_ip', '')
- self.assertRaises(exception.InvalidInput,
- self.driver.check_for_setup_error)
- self._reset_flags()
-
- self._set_flag('san_password', None)
- self._set_flag('san_private_key', None)
- self.assertRaises(exception.InvalidInput,
- self.driver.check_for_setup_error)
- self._reset_flags()
-
- self._set_flag('storwize_svc_vol_grainsize', 42)
- self.assertRaises(exception.InvalidInput,
- self.driver.check_for_setup_error)
- self._reset_flags()
-
- self._set_flag('storwize_svc_vol_compression', True)
- self._set_flag('storwize_svc_vol_rsize', -1)
- self.assertRaises(exception.InvalidInput,
- self.driver.check_for_setup_error)
- self._reset_flags()
-
- self._set_flag('storwize_svc_vol_rsize', 2)
- self._set_flag('storwize_svc_vol_nofmtdisk', True)
- self.assertRaises(exception.InvalidInput,
- self.driver.check_for_setup_error)
- self._reset_flags()
-
- self._set_flag('storwize_svc_connection_protocol', 'foo')
- self.assertRaises(exception.InvalidInput,
- self.driver.check_for_setup_error)
- self._reset_flags()
-
- self._set_flag('storwize_svc_vol_iogrp', 5)
- self.assertRaises(exception.InvalidInput,
- self.driver.check_for_setup_error)
- self._reset_flags()
-
- if self.USESIM:
- self.sim.error_injection('lslicense', 'no_compression')
- self.sim.error_injection('lsguicapabilities', 'no_compression')
- self._set_flag('storwize_svc_vol_compression', True)
- self.driver.do_setup(None)
- self.assertRaises(exception.InvalidInput,
- self.driver.check_for_setup_error)
- self._reset_flags()
+ def _create_volume(self, **kwargs):
+ vol = testutils.create_volume(self.ctxt, **kwargs)
+ self.iscsi_driver.create_volume(vol)
+ return vol
- # Finally, check with good parameters
- self.driver.do_setup(None)
+ def _delete_volume(self, volume):
+ self.iscsi_driver.delete_volume(volume)
+ self.db.volume_destroy(self.ctxt, volume['id'])
def _generate_vol_info(self, vol_name, vol_id):
- rand_id = str(random.randint(10000, 99999))
+ rand_id = six.text_type(random.randint(10000, 99999))
if vol_name:
return {'name': 'snap_volume%s' % rand_id,
'volume_name': vol_name,
else:
return {'name': 'test_volume%s' % rand_id,
'size': 10,
- 'id': '%s' % rand_id,
+ 'id': rand_id,
'volume_type_id': None,
'mdisk_grp_name': 'openstack'}
- def _create_volume(self, **kwargs):
- vol = testutils.create_volume(self.ctxt, **kwargs)
- self.driver.create_volume(vol)
- return vol
-
- def _delete_volume(self, volume):
- self.driver.delete_volume(volume)
- self.db.volume_destroy(self.ctxt, volume['id'])
-
- def _create_consistencygroup_in_db(self, **kwargs):
- cg = testutils.create_consistencygroup(self.ctxt, **kwargs)
- return cg
-
- def _create_cgsnapshot_in_db(self, cg_id, **kwargs):
- cg_snapshot = testutils.create_cgsnapshot(self.ctxt,
- consistencygroup_id= cg_id,
- **kwargs)
+ def _assert_vol_exists(self, name, exists):
+ is_vol_defined = self.iscsi_driver._helpers.is_vdisk_defined(name)
+ self.assertEqual(exists, is_vol_defined)
- cg_id = cg_snapshot['consistencygroup_id']
- volumes = self.db.volume_get_all_by_group(self.ctxt.elevated(), cg_id)
+ def test_storwize_svc_iscsi_validate_connector(self):
+ conn_neither = {'host': 'host'}
+ conn_iscsi = {'host': 'host', 'initiator': 'foo'}
+ conn_fc = {'host': 'host', 'wwpns': 'bar'}
+ conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'}
- if not volumes:
- msg = _("Consistency group is empty. No cgsnapshot "
- "will be created.")
- raise exception.InvalidConsistencyGroup(reason=msg)
+ self.iscsi_driver._state['enabled_protocols'] = set(['iSCSI'])
+ self.iscsi_driver.validate_connector(conn_iscsi)
+ self.iscsi_driver.validate_connector(conn_both)
+ self.assertRaises(exception.InvalidConnectorException,
+ self.iscsi_driver.validate_connector, conn_fc)
+ self.assertRaises(exception.InvalidConnectorException,
+ self.iscsi_driver.validate_connector, conn_neither)
- for volume in volumes:
- testutils.create_snapshot(self.ctxt,
- volume['id'],
- cg_snapshot.id,
- cg_snapshot.name,
- cg_snapshot.id,
- "creating")
+ self.iscsi_driver._state['enabled_protocols'] = set(['iSCSI', 'FC'])
+ self.iscsi_driver.validate_connector(conn_iscsi)
+ self.iscsi_driver.validate_connector(conn_both)
+ self.assertRaises(exception.InvalidConnectorException,
+ self.iscsi_driver.validate_connector, conn_neither)
- return cg_snapshot
+ def test_storwize_terminate_iscsi_connection(self):
+ # create a iSCSI volume
+ volume_iSCSI = self._create_volume()
+ extra_spec = {'capabilities:storage_protocol': '<in> iSCSI'}
+ vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec)
+ volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id']
- def _create_test_vol(self, opts):
- ctxt = testutils.get_test_admin_context()
- type_ref = volume_types.create(ctxt, 'testtype', opts)
- volume = self._generate_vol_info(None, None)
- type_id = type_ref['id']
- type_ref = volume_types.get_volume_type(ctxt, type_id)
- volume['volume_type_id'] = type_id
- volume['volume_type'] = type_ref
- self.driver.create_volume(volume)
+ connector = {'host': 'storwize-svc-host',
+ 'wwnns': ['20000090fa17311e', '20000090fa17311f'],
+ 'wwpns': ['ff00000000000000', 'ff00000000000001'],
+ 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'}
- attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
- self.driver.delete_volume(volume)
- volume_types.destroy(ctxt, type_ref['id'])
- return attrs
+ self.iscsi_driver.initialize_connection(volume_iSCSI, connector)
+ self.iscsi_driver.terminate_connection(volume_iSCSI, connector)
- def _get_default_opts(self):
- opt = {'rsize': 2,
- 'warning': 0,
- 'autoexpand': True,
- 'grainsize': 256,
- 'compression': False,
- 'easytier': True,
- 'protocol': 'iSCSI',
- 'iogrp': 0,
- 'qos': None,
- 'replication': False,
- 'stretched_cluster': None,
- 'nofmtdisk': False}
- return opt
+ def test_storwize_svc_iscsi_host_maps(self):
+ # Create two volumes to be used in mappings
- @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'add_vdisk_qos')
- @mock.patch.object(storwize_svc.StorwizeSVCDriver, '_get_vdisk_params')
- def test_storwize_svc_create_volume_with_qos(self, get_vdisk_params,
- add_vdisk_qos):
- vol = testutils.create_volume(self.ctxt)
- fake_opts = self._get_default_opts()
- # If the qos is empty, chvdisk should not be called
- # for create_volume.
- get_vdisk_params.return_value = fake_opts
- self.driver.create_volume(vol)
- self._assert_vol_exists(vol['name'], True)
- self.assertFalse(add_vdisk_qos.called)
- self.driver.delete_volume(vol)
+ ctxt = context.get_admin_context()
+ volume1 = self._generate_vol_info(None, None)
+ self.iscsi_driver.create_volume(volume1)
+ volume2 = self._generate_vol_info(None, None)
+ self.iscsi_driver.create_volume(volume2)
- # If the qos is not empty, chvdisk should be called
- # for create_volume.
- fake_opts['qos'] = {'IOThrottling': 5000}
- get_vdisk_params.return_value = fake_opts
- self.driver.create_volume(vol)
- self._assert_vol_exists(vol['name'], True)
- add_vdisk_qos.assert_called_once_with(vol['name'], fake_opts['qos'])
+ # Create volume types that we created
+ types = {}
+ for protocol in ['iSCSI']:
+ opts = {'storage_protocol': '<in> ' + protocol}
+ types[protocol] = volume_types.create(ctxt, protocol, opts)
- self.driver.delete_volume(vol)
- self._assert_vol_exists(vol['name'], False)
+ expected = {'iSCSI': {'driver_volume_type': 'iscsi',
+ 'data': {'target_discovered': False,
+ 'target_iqn':
+ 'iqn.1982-01.com.ibm:1234.sim.node1',
+ 'target_portal': '1.234.56.78:3260',
+ 'target_lun': 0,
+ 'auth_method': 'CHAP',
+ 'discovery_auth_method': 'CHAP'}}}
- def test_storwize_svc_snapshots(self):
- vol1 = self._create_volume()
- snap1 = self._generate_vol_info(vol1['name'], vol1['id'])
+ volume1['volume_type_id'] = types[protocol]['id']
+ volume2['volume_type_id'] = types[protocol]['id']
- # Test timeout and volume cleanup
- self._set_flag('storwize_svc_flashcopy_timeout', 1)
- self.assertRaises(exception.VolumeDriverException,
- self.driver.create_snapshot, snap1)
- self._assert_vol_exists(snap1['name'], False)
- self._reset_flags()
+ # Check case where no hosts exist
+ if self.USESIM:
+ ret = self.iscsi_driver._helpers.get_host_from_connector(
+ self._connector)
+ self.assertIsNone(ret)
- # Test prestartfcmap failing
- with mock.patch.object(
- storwize_svc_common.StorwizeSSH, 'prestartfcmap') as prestart:
- prestart.side_effect = exception.VolumeBackendAPIException
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.create_snapshot, snap1)
+ # Make sure that the volumes have been created
+ self._assert_vol_exists(volume1['name'], True)
+ self._assert_vol_exists(volume2['name'], True)
+
+ # Initialize connection from the first volume to a host
+ ret = self.iscsi_driver.initialize_connection(
+ volume1, self._connector)
+ self.assertEqual(expected[protocol]['driver_volume_type'],
+ ret['driver_volume_type'])
+ for k, v in expected[protocol]['data'].items():
+ self.assertEqual(v, ret['data'][k])
+
+ # Initialize again, should notice it and do nothing
+ ret = self.iscsi_driver.initialize_connection(
+ volume1, self._connector)
+ self.assertEqual(expected[protocol]['driver_volume_type'],
+ ret['driver_volume_type'])
+ for k, v in expected[protocol]['data'].items():
+ self.assertEqual(v, ret['data'][k])
+
+ # Try to delete the 1st volume (should fail because it is mapped)
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.iscsi_driver.delete_volume,
+ volume1)
+ ret = self.iscsi_driver.terminate_connection(volume1,
+ self._connector)
if self.USESIM:
- self.sim.error_injection('lsfcmap', 'speed_up')
- self.sim.error_injection('startfcmap', 'bad_id')
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.create_snapshot, snap1)
- self._assert_vol_exists(snap1['name'], False)
- self.sim.error_injection('prestartfcmap', 'bad_id')
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.create_snapshot, snap1)
- self._assert_vol_exists(snap1['name'], False)
-
- # Test successful snapshot
- self.driver.create_snapshot(snap1)
- self._assert_vol_exists(snap1['name'], True)
+ ret = self.iscsi_driver._helpers.get_host_from_connector(
+ self._connector)
+ self.assertIsNone(ret)
- # Try to create a snapshot from an non-existing volume - should fail
+ # Check cases with no auth set for host
+ if self.USESIM:
+ for auth_enabled in [True, False]:
+ for host_exists in ['yes-auth', 'yes-noauth', 'no']:
+ self._set_flag('storwize_svc_iscsi_chap_enabled',
+ auth_enabled)
+ case = 'en' + six.text_type(
+ auth_enabled) + 'ex' + six.text_type(host_exists)
+ conn_na = {'initiator': 'test:init:%s' %
+ random.randint(10000, 99999),
+ 'ip': '11.11.11.11',
+ 'host': 'host-%s' % case}
+ if host_exists.startswith('yes'):
+ self.sim._add_host_to_list(conn_na)
+ if host_exists == 'yes-auth':
+ kwargs = {'chapsecret': 'foo',
+ 'obj': conn_na['host']}
+ self.sim._cmd_chhost(**kwargs)
+ volume1['volume_type_id'] = types['iSCSI']['id']
+
+ init_ret = self.iscsi_driver.initialize_connection(volume1,
+ conn_na)
+ host_name = self.sim._host_in_list(conn_na['host'])
+ chap_ret = (
+ self.iscsi_driver._helpers.get_chap_secret_for_host(
+ host_name))
+ if auth_enabled or host_exists == 'yes-auth':
+ self.assertIn('auth_password', init_ret['data'])
+ self.assertIsNotNone(chap_ret)
+ else:
+ self.assertNotIn('auth_password', init_ret['data'])
+ self.assertIsNone(chap_ret)
+ self.iscsi_driver.terminate_connection(volume1, conn_na)
+ self._set_flag('storwize_svc_iscsi_chap_enabled', True)
+
+ # Test no preferred node
+ if self.USESIM:
+ self.sim.error_injection('lsvdisk', 'no_pref_node')
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.iscsi_driver.initialize_connection,
+ volume1, self._connector)
+
+ # Initialize connection from the second volume to the host with no
+ # preferred node set if in simulation mode, otherwise, just
+ # another initialize connection.
+ if self.USESIM:
+ self.sim.error_injection('lsvdisk', 'blank_pref_node')
+ self.iscsi_driver.initialize_connection(volume2, self._connector)
+
+ # Try to remove connection from host that doesn't exist (should fail)
+ conn_no_exist = self._connector.copy()
+ conn_no_exist['initiator'] = 'i_dont_exist'
+ conn_no_exist['wwpns'] = ['0000000000000000']
+ self.assertRaises(exception.VolumeDriverException,
+ self.iscsi_driver.terminate_connection,
+ volume1,
+ conn_no_exist)
+
+ # Try to remove connection from volume that isn't mapped (should print
+ # message but NOT fail)
+ unmapped_vol = self._generate_vol_info(None, None)
+ self.iscsi_driver.create_volume(unmapped_vol)
+ self.iscsi_driver.terminate_connection(unmapped_vol, self._connector)
+ self.iscsi_driver.delete_volume(unmapped_vol)
+
+ # Remove the mapping from the 1st volume and delete it
+ self.iscsi_driver.terminate_connection(volume1, self._connector)
+ self.iscsi_driver.delete_volume(volume1)
+ self._assert_vol_exists(volume1['name'], False)
+
+ # Make sure our host still exists
+ host_name = self.iscsi_driver._helpers.get_host_from_connector(
+ self._connector)
+ self.assertIsNotNone(host_name)
+
+ # Remove the mapping from the 2nd volume. The host should
+ # be automatically removed because there are no more mappings.
+ self.iscsi_driver.terminate_connection(volume2, self._connector)
+
+ # Check if we successfully terminate connections when the host is not
+ # specified (see bug #1244257)
+ fake_conn = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
+ self.iscsi_driver.initialize_connection(volume2, self._connector)
+ host_name = self.iscsi_driver._helpers.get_host_from_connector(
+ self._connector)
+ self.assertIsNotNone(host_name)
+ self.iscsi_driver.terminate_connection(volume2, fake_conn)
+ host_name = self.iscsi_driver._helpers.get_host_from_connector(
+ self._connector)
+ self.assertIsNone(host_name)
+ self.iscsi_driver.delete_volume(volume2)
+ self._assert_vol_exists(volume2['name'], False)
+
+ # Delete volume types that we created
+ for protocol in ['iSCSI']:
+ volume_types.destroy(ctxt, types[protocol]['id'])
+
+ # Check if our host still exists (it should not)
+ if self.USESIM:
+ ret = (
+ self.iscsi_driver._helpers.get_host_from_connector(
+ self._connector))
+ self.assertIsNone(ret)
+
+ def test_storwize_svc_iscsi_multi_host_maps(self):
+ # We can't test connecting to multiple hosts from a single host when
+ # using real storage
+ if not self.USESIM:
+ return
+
+ # Create a volume to be used in mappings
+ ctxt = context.get_admin_context()
+ volume = self._generate_vol_info(None, None)
+ self.iscsi_driver.create_volume(volume)
+
+ # Create volume types for protocols
+ types = {}
+ for protocol in ['iSCSI']:
+ opts = {'storage_protocol': '<in> ' + protocol}
+ types[protocol] = volume_types.create(ctxt, protocol, opts)
+
+ # Create a connector for the second 'host'
+ wwpns = [six.text_type(random.randint(0, 9999999999999999)).zfill(16),
+ six.text_type(random.randint(0, 9999999999999999)).zfill(16)]
+ initiator = 'test.initiator.%s' % six.text_type(random.randint(10000,
+ 99999))
+ conn2 = {'ip': '1.234.56.79',
+ 'host': 'storwize-svc-test2',
+ 'wwpns': wwpns,
+ 'initiator': initiator}
+
+ # Check protocols for iSCSI
+ volume['volume_type_id'] = types[protocol]['id']
+
+ # Make sure that the volume has been created
+ self._assert_vol_exists(volume['name'], True)
+
+ self.iscsi_driver.initialize_connection(volume, self._connector)
+
+ self._set_flag('storwize_svc_multihostmap_enabled', False)
+ self.assertRaises(
+ exception.CinderException,
+ self.iscsi_driver.initialize_connection, volume, conn2)
+
+ self._set_flag('storwize_svc_multihostmap_enabled', True)
+ self.iscsi_driver.initialize_connection(volume, conn2)
+
+ self.iscsi_driver.terminate_connection(volume, conn2)
+ self.iscsi_driver.terminate_connection(volume, self._connector)
+
+
+class StorwizeSVCFcDriverTestCase(test.TestCase):
+ @mock.patch.object(time, 'sleep')
+ def setUp(self, mock_sleep):
+ super(StorwizeSVCFcDriverTestCase, self).setUp()
+ self.USESIM = True
+ if self.USESIM:
+ self.fc_driver = StorwizeSVCFcFakeDriver(
+ configuration=conf.Configuration(None))
+ self._def_flags = {'san_ip': 'hostname',
+ 'san_login': 'user',
+ 'san_password': 'pass',
+ 'storwize_svc_volpool_name': 'openstack',
+ 'storwize_svc_flashcopy_timeout': 20,
+ 'storwize_svc_flashcopy_rate': 49,
+ 'storwize_svc_multipath_enabled': False,
+ 'storwize_svc_allow_tenant_qos': True}
+ wwpns = [
+ six.text_type(random.randint(0, 9999999999999999)).zfill(16),
+ six.text_type(random.randint(0, 9999999999999999)).zfill(16)]
+ initiator = 'test.initiator.%s' % six.text_type(
+ random.randint(10000, 99999))
+ self._connector = {'ip': '1.234.56.78',
+ 'host': 'storwize-svc-test',
+ 'wwpns': wwpns,
+ 'initiator': initiator}
+ self.sim = StorwizeSVCManagementSimulator('openstack')
+
+ self.fc_driver.set_fake_storage(self.sim)
+ self.ctxt = context.get_admin_context()
+
+ self._reset_flags()
+ self.ctxt = context.get_admin_context()
+ db_driver = self.fc_driver.configuration.db_driver
+ self.db = importutils.import_module(db_driver)
+ self.fc_driver.db = self.db
+ self.fc_driver.do_setup(None)
+ self.fc_driver.check_for_setup_error()
+ self.fc_driver._helpers.check_fcmapping_interval = 0
+
+ def _set_flag(self, flag, value):
+ group = self.fc_driver.configuration.config_group
+ self.fc_driver.configuration.set_override(flag, value, group)
+
+ def _reset_flags(self):
+ self.fc_driver.configuration.local_conf.reset()
+ for k, v in self._def_flags.items():
+ self._set_flag(k, v)
+
+ def _create_volume(self, **kwargs):
+ vol = testutils.create_volume(self.ctxt, **kwargs)
+ self.fc_driver.create_volume(vol)
+ return vol
+
+ def _delete_volume(self, volume):
+ self.fc_driver.delete_volume(volume)
+ self.db.volume_destroy(self.ctxt, volume['id'])
+
+ def _generate_vol_info(self, vol_name, vol_id):
+ rand_id = six.text_type(random.randint(10000, 99999))
+ if vol_name:
+ return {'name': 'snap_volume%s' % rand_id,
+ 'volume_name': vol_name,
+ 'id': rand_id,
+ 'volume_id': vol_id,
+ 'volume_size': 10,
+ 'mdisk_grp_name': 'openstack'}
+ else:
+ return {'name': 'test_volume%s' % rand_id,
+ 'size': 10,
+ 'id': '%s' % rand_id,
+ 'volume_type_id': None,
+ 'mdisk_grp_name': 'openstack'}
+
+ def _assert_vol_exists(self, name, exists):
+ is_vol_defined = self.fc_driver._helpers.is_vdisk_defined(name)
+ self.assertEqual(exists, is_vol_defined)
+
+ def test_storwize_get_host_with_fc_connection(self):
+ # Create a FC host
+ del self._connector['initiator']
+ helper = self.fc_driver._helpers
+ host_name = helper.create_host(self._connector)
+
+ # Remove the first wwpn from connector, and then try get host
+ wwpns = self._connector['wwpns']
+ wwpns.remove(wwpns[0])
+ host_name = helper.get_host_from_connector(self._connector)
+
+ self.assertIsNotNone(host_name)
+
+ def test_storwize_initiator_multiple_wwpns_connected(self):
+
+ # Generate us a test volume
+ volume = self._create_volume()
+
+ # Fibre Channel volume type
+ extra_spec = {'capabilities:storage_protocol': '<in> FC'}
+ vol_type = volume_types.create(self.ctxt, 'FC', extra_spec)
+
+ volume['volume_type_id'] = vol_type['id']
+
+ # Make sure that the volumes have been created
+ self._assert_vol_exists(volume['name'], True)
+
+ # Set up one WWPN that won't match and one that will.
+ self.fc_driver._state['storage_nodes']['1']['WWPN'] = [
+ '123456789ABCDEF0', 'AABBCCDDEEFF0010']
+
+ wwpns = ['ff00000000000000', 'ff00000000000001']
+ connector = {'host': 'storwize-svc-test', 'wwpns': wwpns}
+
+ with mock.patch.object(storwize_svc_common.StorwizeHelpers,
+ 'get_conn_fc_wwpns') as get_mappings:
+ mapped_wwpns = ['AABBCCDDEEFF0001', 'AABBCCDDEEFF0002',
+ 'AABBCCDDEEFF0010', 'AABBCCDDEEFF0012']
+ get_mappings.return_value = mapped_wwpns
+
+ # Initialize the connection
+ init_ret = self.fc_driver.initialize_connection(volume, connector)
+
+ # Make sure we return all wwpns which where mapped as part of the
+ # connection
+ self.assertEqual(mapped_wwpns,
+ init_ret['data']['target_wwn'])
+
+ def test_storwize_svc_fc_validate_connector(self):
+ conn_neither = {'host': 'host'}
+ conn_iscsi = {'host': 'host', 'initiator': 'foo'}
+ conn_fc = {'host': 'host', 'wwpns': 'bar'}
+ conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'}
+
+ self.fc_driver._state['enabled_protocols'] = set(['FC'])
+ self.fc_driver.validate_connector(conn_fc)
+ self.fc_driver.validate_connector(conn_both)
+ self.assertRaises(exception.InvalidConnectorException,
+ self.fc_driver.validate_connector, conn_iscsi)
+ self.assertRaises(exception.InvalidConnectorException,
+ self.fc_driver.validate_connector, conn_neither)
+
+ self.fc_driver._state['enabled_protocols'] = set(['iSCSI', 'FC'])
+ self.fc_driver.validate_connector(conn_fc)
+ self.fc_driver.validate_connector(conn_both)
+ self.assertRaises(exception.InvalidConnectorException,
+ self.fc_driver.validate_connector, conn_neither)
+
+ def test_storwize_terminate_fc_connection(self):
+ # create a FC volume
+ volume_fc = self._create_volume()
+ extra_spec = {'capabilities:storage_protocol': '<in> FC'}
+ vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec)
+ volume_fc['volume_type_id'] = vol_type_fc['id']
+
+ connector = {'host': 'storwize-svc-host',
+ 'wwnns': ['20000090fa17311e', '20000090fa17311f'],
+ 'wwpns': ['ff00000000000000', 'ff00000000000001'],
+ 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'}
+
+ self.fc_driver.initialize_connection(volume_fc, connector)
+ self.fc_driver.terminate_connection(volume_fc, connector)
+
+ def test_storwize_initiator_target_map(self):
+ # Generate us a test volume
+ volume = self._create_volume()
+
+ # FIbre Channel volume type
+ extra_spec = {'capabilities:storage_protocol': '<in> FC'}
+ vol_type = volume_types.create(self.ctxt, 'FC', extra_spec)
+
+ volume['volume_type_id'] = vol_type['id']
+
+ # Make sure that the volumes have been created
+ self._assert_vol_exists(volume['name'], True)
+
+ wwpns = ['ff00000000000000', 'ff00000000000001']
+ connector = {'host': 'storwize-svc-test', 'wwpns': wwpns}
+
+ # Initialise the connection
+ init_ret = self.fc_driver.initialize_connection(volume, connector)
+
+ # Check that the initiator_target_map is as expected
+ init_data = {'driver_volume_type': 'fibre_channel',
+ 'data': {'initiator_target_map':
+ {'ff00000000000000': ['AABBCCDDEEFF0011'],
+ 'ff00000000000001': ['AABBCCDDEEFF0011']},
+ 'target_discovered': False,
+ 'target_lun': 0,
+ 'target_wwn': ['AABBCCDDEEFF0011'],
+ 'volume_id': volume['id']
+ }
+ }
+
+ self.assertEqual(init_data, init_ret)
+
+ # Terminate connection
+ term_ret = self.fc_driver.terminate_connection(volume, connector)
+
+ # Check that the initiator_target_map is as expected
+ term_data = {'driver_volume_type': 'fibre_channel',
+ 'data': {'initiator_target_map':
+ {'ff00000000000000': ['AABBCCDDEEFF0011'],
+ 'ff00000000000001': ['AABBCCDDEEFF0011']}
+ }
+ }
+
+ self.assertEqual(term_data, term_ret)
+
+ def test_storwize_svc_fc_host_maps(self):
+ # Create two volumes to be used in mappings
+
+ ctxt = context.get_admin_context()
+ volume1 = self._generate_vol_info(None, None)
+ self.fc_driver.create_volume(volume1)
+ volume2 = self._generate_vol_info(None, None)
+ self.fc_driver.create_volume(volume2)
+
+ # Create volume types that we created
+ types = {}
+ for protocol in ['FC']:
+ opts = {'storage_protocol': '<in> ' + protocol}
+ types[protocol] = volume_types.create(ctxt, protocol, opts)
+
+ expected = {'FC': {'driver_volume_type': 'fibre_channel',
+ 'data': {'target_lun': 0,
+ 'target_wwn': ['AABBCCDDEEFF0011'],
+ 'target_discovered': False}}}
+
+ volume1['volume_type_id'] = types[protocol]['id']
+ volume2['volume_type_id'] = types[protocol]['id']
+
+ # Check case where no hosts exist
+ if self.USESIM:
+ ret = self.fc_driver._helpers.get_host_from_connector(
+ self._connector)
+ self.assertIsNone(ret)
+
+ # Make sure that the volumes have been created
+ self._assert_vol_exists(volume1['name'], True)
+ self._assert_vol_exists(volume2['name'], True)
+
+ # Initialize connection from the first volume to a host
+ ret = self.fc_driver.initialize_connection(
+ volume1, self._connector)
+ self.assertEqual(expected[protocol]['driver_volume_type'],
+ ret['driver_volume_type'])
+ for k, v in expected[protocol]['data'].items():
+ self.assertEqual(v, ret['data'][k])
+
+ # Initialize again, should notice it and do nothing
+ ret = self.fc_driver.initialize_connection(
+ volume1, self._connector)
+ self.assertEqual(expected[protocol]['driver_volume_type'],
+ ret['driver_volume_type'])
+ for k, v in expected[protocol]['data'].items():
+ self.assertEqual(v, ret['data'][k])
+
+ # Try to delete the 1st volume (should fail because it is mapped)
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.fc_driver.delete_volume,
+ volume1)
+
+ # Check bad output from lsfabric for the 2nd volume
+ if protocol == 'FC' and self.USESIM:
+ for error in ['remove_field', 'header_mismatch']:
+ self.sim.error_injection('lsfabric', error)
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.fc_driver.initialize_connection,
+ volume2, self._connector)
+
+ with mock.patch.object(storwize_svc_common.StorwizeHelpers,
+ 'get_conn_fc_wwpns') as conn_fc_wwpns:
+ conn_fc_wwpns.return_value = []
+ ret = self.fc_driver.initialize_connection(volume2,
+ self._connector)
+
+ ret = self.fc_driver.terminate_connection(volume1, self._connector)
+ if protocol == 'FC' and self.USESIM:
+ # For the first volume detach, ret['data'] should be empty
+ # only ret['driver_volume_type'] returned
+ self.assertEqual({}, ret['data'])
+ self.assertEqual('fibre_channel', ret['driver_volume_type'])
+ ret = self.fc_driver.terminate_connection(volume2,
+ self._connector)
+ self.assertEqual('fibre_channel', ret['driver_volume_type'])
+ # wwpn is randomly created
+ self.assertNotEqual({}, ret['data'])
+ if self.USESIM:
+ ret = self.fc_driver._helpers.get_host_from_connector(
+ self._connector)
+ self.assertIsNone(ret)
+
+ # Test no preferred node
+ if self.USESIM:
+ self.sim.error_injection('lsvdisk', 'no_pref_node')
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.fc_driver.initialize_connection,
+ volume1, self._connector)
+
+ # Initialize connection from the second volume to the host with no
+ # preferred node set if in simulation mode, otherwise, just
+ # another initialize connection.
+ if self.USESIM:
+ self.sim.error_injection('lsvdisk', 'blank_pref_node')
+ self.fc_driver.initialize_connection(volume2, self._connector)
+
+ # Try to remove connection from host that doesn't exist (should fail)
+ conn_no_exist = self._connector.copy()
+ conn_no_exist['initiator'] = 'i_dont_exist'
+ conn_no_exist['wwpns'] = ['0000000000000000']
+ self.assertRaises(exception.VolumeDriverException,
+ self.fc_driver.terminate_connection,
+ volume1,
+ conn_no_exist)
+
+ # Try to remove connection from volume that isn't mapped (should print
+ # message but NOT fail)
+ unmapped_vol = self._generate_vol_info(None, None)
+ self.fc_driver.create_volume(unmapped_vol)
+ self.fc_driver.terminate_connection(unmapped_vol, self._connector)
+ self.fc_driver.delete_volume(unmapped_vol)
+
+ # Remove the mapping from the 1st volume and delete it
+ self.fc_driver.terminate_connection(volume1, self._connector)
+ self.fc_driver.delete_volume(volume1)
+ self._assert_vol_exists(volume1['name'], False)
+
+ # Make sure our host still exists
+ host_name = self.fc_driver._helpers.get_host_from_connector(
+ self._connector)
+ self.assertIsNotNone(host_name)
+
+ # Remove the mapping from the 2nd volume. The host should
+ # be automatically removed because there are no more mappings.
+ self.fc_driver.terminate_connection(volume2, self._connector)
+
+ # Check if we successfully terminate connections when the host is not
+ # specified (see bug #1244257)
+ fake_conn = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
+ self.fc_driver.initialize_connection(volume2, self._connector)
+ host_name = self.fc_driver._helpers.get_host_from_connector(
+ self._connector)
+ self.assertIsNotNone(host_name)
+ self.fc_driver.terminate_connection(volume2, fake_conn)
+ host_name = self.fc_driver._helpers.get_host_from_connector(
+ self._connector)
+ self.assertIsNone(host_name)
+ self.fc_driver.delete_volume(volume2)
+ self._assert_vol_exists(volume2['name'], False)
+
+ # Delete volume types that we created
+ for protocol in ['FC']:
+ volume_types.destroy(ctxt, types[protocol]['id'])
+
+ # Check if our host still exists (it should not)
+ if self.USESIM:
+ ret = (self.fc_driver._helpers.get_host_from_connector(
+ self._connector))
+ self.assertIsNone(ret)
+
+ def test_storwize_svc_fc_multi_host_maps(self):
+ # We can't test connecting to multiple hosts from a single host when
+ # using real storage
+ if not self.USESIM:
+ return
+
+ # Create a volume to be used in mappings
+ ctxt = context.get_admin_context()
+ volume = self._generate_vol_info(None, None)
+ self.fc_driver.create_volume(volume)
+
+ # Create volume types for protocols
+ types = {}
+ for protocol in ['FC']:
+ opts = {'storage_protocol': '<in> ' + protocol}
+ types[protocol] = volume_types.create(ctxt, protocol, opts)
+
+ # Create a connector for the second 'host'
+ wwpns = [six.text_type(random.randint(0, 9999999999999999)).zfill(16),
+ six.text_type(random.randint(0, 9999999999999999)).zfill(16)]
+ initiator = 'test.initiator.%s' % six.text_type(random.randint(10000,
+ 99999))
+ conn2 = {'ip': '1.234.56.79',
+ 'host': 'storwize-svc-test2',
+ 'wwpns': wwpns,
+ 'initiator': initiator}
+
+ # Check protocols for FC
+
+ volume['volume_type_id'] = types[protocol]['id']
+
+ # Make sure that the volume has been created
+ self._assert_vol_exists(volume['name'], True)
+
+ self.fc_driver.initialize_connection(volume, self._connector)
+
+ self._set_flag('storwize_svc_multihostmap_enabled', False)
+ self.assertRaises(
+ exception.CinderException,
+ self.fc_driver.initialize_connection, volume, conn2)
+
+ self._set_flag('storwize_svc_multihostmap_enabled', True)
+ self.fc_driver.initialize_connection(volume, conn2)
+
+ self.fc_driver.terminate_connection(volume, conn2)
+ self.fc_driver.terminate_connection(volume, self._connector)
+
+
+class StorwizeSVCCommonDriverTestCase(test.TestCase):
+ @mock.patch.object(time, 'sleep')
+ def setUp(self, mock_sleep):
+ super(StorwizeSVCCommonDriverTestCase, self).setUp()
+ self.USESIM = True
+ if self.USESIM:
+ self.driver = StorwizeSVCISCSIFakeDriver(
+ configuration=conf.Configuration(None))
+
+ self._def_flags = {'san_ip': 'hostname',
+ 'san_login': 'user',
+ 'san_password': 'pass',
+ 'storwize_svc_volpool_name': 'openstack',
+ 'storwize_svc_flashcopy_timeout': 20,
+ 'storwize_svc_flashcopy_rate': 49,
+ 'storwize_svc_allow_tenant_qos': True}
+ wwpns = [
+ six.text_type(random.randint(0, 9999999999999999)).zfill(16),
+ six.text_type(random.randint(0, 9999999999999999)).zfill(16)]
+ initiator = 'test.initiator.%s' % six.text_type(
+ random.randint(10000, 99999))
+ self._connector = {'ip': '1.234.56.78',
+ 'host': 'storwize-svc-test',
+ 'wwpns': wwpns,
+ 'initiator': initiator}
+ self.sim = StorwizeSVCManagementSimulator('openstack')
+
+ self.driver.set_fake_storage(self.sim)
+ self.ctxt = context.get_admin_context()
+
+ self._reset_flags()
+ self.ctxt = context.get_admin_context()
+ db_driver = self.driver.configuration.db_driver
+ self.db = importutils.import_module(db_driver)
+ self.driver.db = self.db
+ self.driver.do_setup(None)
+ self.driver.check_for_setup_error()
+ self.driver._helpers.check_fcmapping_interval = 0
+
+ def _set_flag(self, flag, value):
+ group = self.driver.configuration.config_group
+ self.driver.configuration.set_override(flag, value, group)
+
+ def _reset_flags(self):
+ self.driver.configuration.local_conf.reset()
+ for k, v in self._def_flags.items():
+ self._set_flag(k, v)
+
+ def _assert_vol_exists(self, name, exists):
+ is_vol_defined = self.driver._helpers.is_vdisk_defined(name)
+ self.assertEqual(exists, is_vol_defined)
+
+ def test_storwize_svc_connectivity(self):
+ # Make sure we detect if the pool doesn't exist
+ no_exist_pool = 'i-dont-exist-%s' % random.randint(10000, 99999)
+ self._set_flag('storwize_svc_volpool_name', no_exist_pool)
+ self.assertRaises(exception.InvalidInput,
+ self.driver.do_setup, None)
+ self._reset_flags()
+
+ # Check the case where the user didn't configure IP addresses
+ # as well as receiving unexpected results from the storage
+ if self.USESIM:
+ self.sim.error_injection('lsnodecanister', 'header_mismatch')
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.do_setup, None)
+ self.sim.error_injection('lsnodecanister', 'remove_field')
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.do_setup, None)
+ self.sim.error_injection('lsportip', 'header_mismatch')
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.do_setup, None)
+ self.sim.error_injection('lsportip', 'remove_field')
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.do_setup, None)
+
+ # Check with bad parameters
+ self._set_flag('san_ip', '')
+ self.assertRaises(exception.InvalidInput,
+ self.driver.check_for_setup_error)
+ self._reset_flags()
+
+ self._set_flag('san_password', None)
+ self._set_flag('san_private_key', None)
+ self.assertRaises(exception.InvalidInput,
+ self.driver.check_for_setup_error)
+ self._reset_flags()
+
+ self._set_flag('storwize_svc_vol_grainsize', 42)
+ self.assertRaises(exception.InvalidInput,
+ self.driver.check_for_setup_error)
+ self._reset_flags()
+
+ self._set_flag('storwize_svc_vol_compression', True)
+ self._set_flag('storwize_svc_vol_rsize', -1)
+ self.assertRaises(exception.InvalidInput,
+ self.driver.check_for_setup_error)
+ self._reset_flags()
+
+ self._set_flag('storwize_svc_vol_rsize', 2)
+ self._set_flag('storwize_svc_vol_nofmtdisk', True)
+ self.assertRaises(exception.InvalidInput,
+ self.driver.check_for_setup_error)
+ self._reset_flags()
+
+ self._set_flag('storwize_svc_vol_iogrp', 5)
+ self.assertRaises(exception.InvalidInput,
+ self.driver.check_for_setup_error)
+ self._reset_flags()
+
+ if self.USESIM:
+ self.sim.error_injection('lslicense', 'no_compression')
+ self.sim.error_injection('lsguicapabilities', 'no_compression')
+ self._set_flag('storwize_svc_vol_compression', True)
+ self.driver.do_setup(None)
+ self.assertRaises(exception.InvalidInput,
+ self.driver.check_for_setup_error)
+ self._reset_flags()
+
+ # Finally, check with good parameters
+ self.driver.do_setup(None)
+
+ def _generate_vol_info(self, vol_name, vol_id):
+ rand_id = six.text_type(random.randint(10000, 99999))
+ if vol_name:
+ return {'name': 'snap_volume%s' % rand_id,
+ 'volume_name': vol_name,
+ 'id': rand_id,
+ 'volume_id': vol_id,
+ 'volume_size': 10,
+ 'mdisk_grp_name': 'openstack'}
+ else:
+ return {'name': 'test_volume%s' % rand_id,
+ 'size': 10,
+ 'id': '%s' % rand_id,
+ 'volume_type_id': None,
+ 'mdisk_grp_name': 'openstack'}
+
+ def _create_volume(self, **kwargs):
+ vol = testutils.create_volume(self.ctxt, **kwargs)
+ self.driver.create_volume(vol)
+ return vol
+
+ def _delete_volume(self, volume):
+ self.driver.delete_volume(volume)
+ self.db.volume_destroy(self.ctxt, volume['id'])
+
+ def _create_consistencygroup_in_db(self, **kwargs):
+ cg = testutils.create_consistencygroup(self.ctxt, **kwargs)
+ return cg
+
+ def _create_cgsnapshot_in_db(self, cg_id, **kwargs):
+ cg_snapshot = testutils.create_cgsnapshot(self.ctxt,
+ consistencygroup_id= cg_id,
+ **kwargs)
+
+ cg_id = cg_snapshot['consistencygroup_id']
+ volumes = self.db.volume_get_all_by_group(self.ctxt.elevated(), cg_id)
+
+ if not volumes:
+ msg = _("Consistency group is empty. No cgsnapshot "
+ "will be created.")
+ raise exception.InvalidConsistencyGroup(reason=msg)
+
+ for volume in volumes:
+ testutils.create_snapshot(self.ctxt,
+ volume['id'],
+ cg_snapshot.id,
+ cg_snapshot.name,
+ cg_snapshot.id,
+ "creating")
+
+ return cg_snapshot
+
+ def _create_test_vol(self, opts):
+ ctxt = testutils.get_test_admin_context()
+ type_ref = volume_types.create(ctxt, 'testtype', opts)
+ volume = self._generate_vol_info(None, None)
+ type_id = type_ref['id']
+ type_ref = volume_types.get_volume_type(ctxt, type_id)
+ volume['volume_type_id'] = type_id
+ volume['volume_type'] = type_ref
+ self.driver.create_volume(volume)
+
+ attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
+ self.driver.delete_volume(volume)
+ volume_types.destroy(ctxt, type_ref['id'])
+ return attrs
+
+ def _get_default_opts(self):
+ opt = {'rsize': 2,
+ 'warning': 0,
+ 'autoexpand': True,
+ 'grainsize': 256,
+ 'compression': False,
+ 'easytier': True,
+ 'protocol': 'iSCSI',
+ 'iogrp': 0,
+ 'qos': None,
+ 'replication': False,
+ 'stretched_cluster': None,
+ 'nofmtdisk': False}
+ return opt
+
+ @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'add_vdisk_qos')
+ @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver,
+ '_get_vdisk_params')
+ def test_storwize_svc_create_volume_with_qos(self, get_vdisk_params,
+ add_vdisk_qos):
+ vol = testutils.create_volume(self.ctxt)
+ fake_opts = self._get_default_opts()
+ # If the qos is empty, chvdisk should not be called
+ # for create_volume.
+ get_vdisk_params.return_value = fake_opts
+ self.driver.create_volume(vol)
+ self._assert_vol_exists(vol['name'], True)
+ self.assertFalse(add_vdisk_qos.called)
+ self.driver.delete_volume(vol)
+
+ # If the qos is not empty, chvdisk should be called
+ # for create_volume.
+ fake_opts['qos'] = {'IOThrottling': 5000}
+ get_vdisk_params.return_value = fake_opts
+ self.driver.create_volume(vol)
+ self._assert_vol_exists(vol['name'], True)
+ add_vdisk_qos.assert_called_once_with(vol['name'], fake_opts['qos'])
+
+ self.driver.delete_volume(vol)
+ self._assert_vol_exists(vol['name'], False)
+
+ def test_storwize_svc_snapshots(self):
+ vol1 = self._create_volume()
+ snap1 = self._generate_vol_info(vol1['name'], vol1['id'])
+
+ # Test timeout and volume cleanup
+ self._set_flag('storwize_svc_flashcopy_timeout', 1)
+ self.assertRaises(exception.VolumeDriverException,
+ self.driver.create_snapshot, snap1)
+ self._assert_vol_exists(snap1['name'], False)
+ self._reset_flags()
+
+ # Test prestartfcmap failing
+ with mock.patch.object(
+ storwize_svc_common.StorwizeSSH, 'prestartfcmap') as prestart:
+ prestart.side_effect = exception.VolumeBackendAPIException
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.create_snapshot, snap1)
+
+ if self.USESIM:
+ self.sim.error_injection('lsfcmap', 'speed_up')
+ self.sim.error_injection('startfcmap', 'bad_id')
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.create_snapshot, snap1)
+ self._assert_vol_exists(snap1['name'], False)
+ self.sim.error_injection('prestartfcmap', 'bad_id')
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.create_snapshot, snap1)
+ self._assert_vol_exists(snap1['name'], False)
+
+ # Test successful snapshot
+ self.driver.create_snapshot(snap1)
+ self._assert_vol_exists(snap1['name'], True)
+
+ # Try to create a snapshot from an non-existing volume - should fail
snap_novol = self._generate_vol_info('undefined-vol', '12345')
self.assertRaises(exception.VolumeDriverException,
self.driver.create_snapshot,
# If the qos is empty, chvdisk should not be called
# for create_volume_from_snapshot.
- with mock.patch.object(storwize_svc.StorwizeSVCDriver,
+ with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver,
'_get_vdisk_params') as get_vdisk_params:
get_vdisk_params.return_value = fake_opts
self.driver.create_volume_from_snapshot(vol2, snap1)
chck_list.append({'-free_capacity': '0', 'compressed_copy': 'no',
'warning': '0', 'autoexpand': 'on',
'grainsize': '32', 'easy_tier': 'off',
- 'IO_group_id': str(test_iogrp)})
+ 'IO_group_id': six.text_type(test_iogrp)})
opts_list.append({'rsize': 2, 'compression': False, 'warning': 80,
'autoexpand': False, 'grainsize': 256,
'easytier': True})
raise
def test_storwize_svc_unicode_host_and_volume_names(self):
- # We'll check with iSCSI only - nothing protocol-dependednt here
- self._set_flag('storwize_svc_connection_protocol', 'iSCSI')
+ # We'll check with iSCSI only - nothing protocol-dependent here
self.driver.do_setup(None)
rand_id = random.randint(10000, 99999)
self.assertIsNotNone(host_name)
self.driver._helpers.delete_host(host_name)
- def test_storwize_svc_validate_connector(self):
- conn_neither = {'host': 'host'}
- conn_iscsi = {'host': 'host', 'initiator': 'foo'}
- conn_fc = {'host': 'host', 'wwpns': 'bar'}
- conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'}
-
- self.driver._state['enabled_protocols'] = set(['iSCSI'])
- self.driver.validate_connector(conn_iscsi)
- self.driver.validate_connector(conn_both)
- self.assertRaises(exception.InvalidConnectorException,
- self.driver.validate_connector, conn_fc)
- self.assertRaises(exception.InvalidConnectorException,
- self.driver.validate_connector, conn_neither)
-
- self.driver._state['enabled_protocols'] = set(['FC'])
- self.driver.validate_connector(conn_fc)
- self.driver.validate_connector(conn_both)
- self.assertRaises(exception.InvalidConnectorException,
- self.driver.validate_connector, conn_iscsi)
- self.assertRaises(exception.InvalidConnectorException,
- self.driver.validate_connector, conn_neither)
-
- self.driver._state['enabled_protocols'] = set(['iSCSI', 'FC'])
- self.driver.validate_connector(conn_iscsi)
- self.driver.validate_connector(conn_fc)
- self.driver.validate_connector(conn_both)
- self.assertRaises(exception.InvalidConnectorException,
- self.driver.validate_connector, conn_neither)
-
- def test_storwize_svc_host_maps(self):
- # Create two volumes to be used in mappings
-
- ctxt = context.get_admin_context()
- volume1 = self._generate_vol_info(None, None)
- self.driver.create_volume(volume1)
- volume2 = self._generate_vol_info(None, None)
- self.driver.create_volume(volume2)
-
- # Create volume types that we created
- types = {}
- for protocol in ['FC', 'iSCSI']:
- opts = {'storage_protocol': '<in> ' + protocol}
- types[protocol] = volume_types.create(ctxt, protocol, opts)
-
- expected = {'FC': {'driver_volume_type': 'fibre_channel',
- 'data': {'target_lun': 0,
- 'target_wwn': ['AABBCCDDEEFF0011'],
- 'target_discovered': False}},
- 'iSCSI': {'driver_volume_type': 'iscsi',
- 'data': {'target_discovered': False,
- 'target_iqn':
- 'iqn.1982-01.com.ibm:1234.sim.node1',
- 'target_portal': '1.234.56.78:3260',
- 'target_lun': 0,
- 'auth_method': 'CHAP',
- 'discovery_auth_method': 'CHAP'}}}
-
- for protocol in ['FC', 'iSCSI']:
- volume1['volume_type_id'] = types[protocol]['id']
- volume2['volume_type_id'] = types[protocol]['id']
-
- # Check case where no hosts exist
- if self.USESIM:
- ret = self.driver._helpers.get_host_from_connector(
- self._connector)
- self.assertIsNone(ret)
-
- # Make sure that the volumes have been created
- self._assert_vol_exists(volume1['name'], True)
- self._assert_vol_exists(volume2['name'], True)
-
- # Initialize connection from the first volume to a host
- ret = self.driver.initialize_connection(volume1, self._connector)
- self.assertEqual(expected[protocol]['driver_volume_type'],
- ret['driver_volume_type'])
- for k, v in expected[protocol]['data'].items():
- self.assertEqual(v, ret['data'][k])
-
- # Initialize again, should notice it and do nothing
- ret = self.driver.initialize_connection(volume1, self._connector)
- self.assertEqual(expected[protocol]['driver_volume_type'],
- ret['driver_volume_type'])
- for k, v in expected[protocol]['data'].items():
- self.assertEqual(v, ret['data'][k])
-
- # Try to delete the 1st volume (should fail because it is mapped)
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.delete_volume,
- volume1)
-
- # Check bad output from lsfabric for the 2nd volume
- if protocol == 'FC' and self.USESIM:
- for error in ['remove_field', 'header_mismatch']:
- self.sim.error_injection('lsfabric', error)
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.initialize_connection,
- volume2, self._connector)
-
- with mock.patch.object(storwize_svc_common.StorwizeHelpers,
- 'get_conn_fc_wwpns') as conn_fc_wwpns:
- conn_fc_wwpns.return_value = []
-
- ret = self.driver.initialize_connection(volume2,
- self._connector)
-
- ret = self.driver.terminate_connection(volume1, self._connector)
-
- if protocol == 'FC' and self.USESIM:
- # For the first volume detach, ret['data'] should be empty
- # only ret['driver_volume_type'] returned
- self.assertEqual({}, ret['data'])
- self.assertEqual('fibre_channel', ret['driver_volume_type'])
- ret = self.driver.terminate_connection(volume2,
- self._connector)
- self.assertEqual('fibre_channel', ret['driver_volume_type'])
- # wwpn is radom created
- self.assertNotEqual({}, ret['data'])
- if self.USESIM:
- ret = self.driver._helpers.get_host_from_connector(
- self._connector)
- self.assertIsNone(ret)
-
- # Check cases with no auth set for host
- if self.USESIM:
- for auth_enabled in [True, False]:
- for host_exists in ['yes-auth', 'yes-noauth', 'no']:
- self._set_flag('storwize_svc_iscsi_chap_enabled',
- auth_enabled)
- case = 'en' + str(auth_enabled) + 'ex' + str(host_exists)
- conn_na = {'initiator': 'test:init:%s' %
- random.randint(10000, 99999),
- 'ip': '11.11.11.11',
- 'host': 'host-%s' % case}
- if host_exists.startswith('yes'):
- self.sim._add_host_to_list(conn_na)
- if host_exists == 'yes-auth':
- kwargs = {'chapsecret': 'foo',
- 'obj': conn_na['host']}
- self.sim._cmd_chhost(**kwargs)
- volume1['volume_type_id'] = types['iSCSI']['id']
-
- init_ret = self.driver.initialize_connection(volume1,
- conn_na)
- host_name = self.sim._host_in_list(conn_na['host'])
- chap_ret = self.driver._helpers.get_chap_secret_for_host(
- host_name)
- if auth_enabled or host_exists == 'yes-auth':
- self.assertIn('auth_password', init_ret['data'])
- self.assertIsNotNone(chap_ret)
- else:
- self.assertNotIn('auth_password', init_ret['data'])
- self.assertIsNone(chap_ret)
- self.driver.terminate_connection(volume1, conn_na)
- self._set_flag('storwize_svc_iscsi_chap_enabled', True)
-
- # Test no preferred node
- if self.USESIM:
- self.sim.error_injection('lsvdisk', 'no_pref_node')
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.initialize_connection,
- volume1, self._connector)
-
- # Initialize connection from the second volume to the host with no
- # preferred node set if in simulation mode, otherwise, just
- # another initialize connection.
- if self.USESIM:
- self.sim.error_injection('lsvdisk', 'blank_pref_node')
- self.driver.initialize_connection(volume2, self._connector)
-
- # Try to remove connection from host that doesn't exist (should fail)
- conn_no_exist = self._connector.copy()
- conn_no_exist['initiator'] = 'i_dont_exist'
- conn_no_exist['wwpns'] = ['0000000000000000']
- self.assertRaises(exception.VolumeDriverException,
- self.driver.terminate_connection,
- volume1,
- conn_no_exist)
-
- # Try to remove connection from volume that isn't mapped (should print
- # message but NOT fail)
- unmapped_vol = self._generate_vol_info(None, None)
- self.driver.create_volume(unmapped_vol)
- self.driver.terminate_connection(unmapped_vol, self._connector)
- self.driver.delete_volume(unmapped_vol)
-
- # Remove the mapping from the 1st volume and delete it
- self.driver.terminate_connection(volume1, self._connector)
- self.driver.delete_volume(volume1)
- self._assert_vol_exists(volume1['name'], False)
-
- # Make sure our host still exists
- host_name = self.driver._helpers.get_host_from_connector(
- self._connector)
- self.assertIsNotNone(host_name)
-
- # Remove the mapping from the 2nd volume. The host should
- # be automatically removed because there are no more mappings.
- self.driver.terminate_connection(volume2, self._connector)
-
- # Check if we successfully terminate connections when the host is not
- # specified (see bug #1244257)
- fake_conn = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
- self.driver.initialize_connection(volume2, self._connector)
- host_name = self.driver._helpers.get_host_from_connector(
- self._connector)
- self.assertIsNotNone(host_name)
- self.driver.terminate_connection(volume2, fake_conn)
- host_name = self.driver._helpers.get_host_from_connector(
- self._connector)
- self.assertIsNone(host_name)
- self.driver.delete_volume(volume2)
- self._assert_vol_exists(volume2['name'], False)
-
- # Delete volume types that we created
- for protocol in ['FC', 'iSCSI']:
- volume_types.destroy(ctxt, types[protocol]['id'])
-
- # Check if our host still exists (it should not)
- if self.USESIM:
- ret = self.driver._helpers.get_host_from_connector(self._connector)
- self.assertIsNone(ret)
-
- def test_storwize_svc_multi_host_maps(self):
- # We can't test connecting to multiple hosts from a single host when
- # using real storage
- if not self.USESIM:
- return
-
- # Create a volume to be used in mappings
- ctxt = context.get_admin_context()
- volume = self._generate_vol_info(None, None)
- self.driver.create_volume(volume)
-
- # Create volume types for protocols
- types = {}
- for protocol in ['FC', 'iSCSI']:
- opts = {'storage_protocol': '<in> ' + protocol}
- types[protocol] = volume_types.create(ctxt, protocol, opts)
-
- # Create a connector for the second 'host'
- wwpns = [str(random.randint(0, 9999999999999999)).zfill(16),
- str(random.randint(0, 9999999999999999)).zfill(16)]
- initiator = 'test.initiator.%s' % str(random.randint(10000, 99999))
- conn2 = {'ip': '1.234.56.79',
- 'host': 'storwize-svc-test2',
- 'wwpns': wwpns,
- 'initiator': initiator}
-
- for protocol in ['FC', 'iSCSI']:
- volume['volume_type_id'] = types[protocol]['id']
-
- # Make sure that the volume has been created
- self._assert_vol_exists(volume['name'], True)
-
- self.driver.initialize_connection(volume, self._connector)
-
- self._set_flag('storwize_svc_multihostmap_enabled', False)
- self.assertRaises(exception.CinderException,
- self.driver.initialize_connection, volume, conn2)
-
- self._set_flag('storwize_svc_multihostmap_enabled', True)
- self.driver.initialize_connection(volume, conn2)
-
- self.driver.terminate_connection(volume, conn2)
- self.driver.terminate_connection(volume, self._connector)
-
def test_storwize_svc_delete_volume_snapshots(self):
# Create a volume with two snapshots
master = self._create_volume()
volume_metadata=None)
self.assertEqual(expected_qos, params['qos'])
# If type_id is none and volume_type is not none, it should work fine.
- params = self.driver._get_vdisk_params(None,
- volume_type=vol_type_qos,
+ params = self.driver._get_vdisk_params(None, volume_type=vol_type_qos,
volume_metadata=None)
self.assertEqual(expected_qos, params['qos'])
# If both type_id and volume_type are none, no qos will be returned
fake_opts_qos = self._get_default_opts()
fake_opts_qos['qos'] = {'IOThrottling': 5000}
self.driver.create_volume(volume)
- with mock.patch.object(storwize_svc.StorwizeSVCDriver,
+ with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver,
'_get_vdisk_params') as get_vdisk_params:
# If qos is empty for both the source and target volumes,
# add_vdisk_qos and disable_vdisk_qos will not be called for
self.driver.create_volume(volume)
update_vdisk_qos.reset_mock()
- with mock.patch.object(storwize_svc.StorwizeSVCDriver,
+ with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver,
'_get_vdisk_params') as get_vdisk_params:
# If qos is specified for both source and target volumes,
# add_vdisk_qos will be called for retype, and disable_vdisk_qos
self.driver.create_volume(volume)
update_vdisk_qos.reset_mock()
- with mock.patch.object(storwize_svc.StorwizeSVCDriver,
+ with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver,
'_get_vdisk_params') as get_vdisk_params:
# If qos is empty for source and speficied for target volume,
# add_vdisk_qos will be called for retype, and disable_vdisk_qos
self.driver.create_volume(volume)
update_vdisk_qos.reset_mock()
- with mock.patch.object(storwize_svc.StorwizeSVCDriver,
+ with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver,
'_get_vdisk_params') as get_vdisk_params:
# If qos is empty for target volume and specified for source
# volume, add_vdisk_qos will not be called for retype, and
fake_opts_qos = self._get_default_opts()
fake_opts_qos['qos'] = {'IOThrottling': 5000}
self.driver.create_volume(volume)
- with mock.patch.object(storwize_svc.StorwizeSVCDriver,
+ with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver,
'_get_vdisk_params') as get_vdisk_params:
# If qos is empty for both the source and target volumes,
# add_vdisk_qos and disable_vdisk_qos will not be called for
self.driver.create_volume(volume)
update_vdisk_qos.reset_mock()
- with mock.patch.object(storwize_svc.StorwizeSVCDriver,
+ with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver,
'_get_vdisk_params') as get_vdisk_params:
# If qos is specified for both source and target volumes,
# add_vdisk_qos will be called for retype, and disable_vdisk_qos
self.driver.create_volume(volume)
update_vdisk_qos.reset_mock()
- with mock.patch.object(storwize_svc.StorwizeSVCDriver,
+ with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver,
'_get_vdisk_params') as get_vdisk_params:
# If qos is empty for source and speficied for target volume,
# add_vdisk_qos will be called for retype, and disable_vdisk_qos
self.driver.create_volume(volume)
update_vdisk_qos.reset_mock()
- with mock.patch.object(storwize_svc.StorwizeSVCDriver,
+ with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver,
'_get_vdisk_params') as get_vdisk_params:
# If qos is empty for target volume and specified for source
# volume, add_vdisk_qos will not be called for retype, and
self.driver.delete_volume(volume)
self.assertNotIn(volume['id'], self.driver._vdiskcopyops)
- def test_storwize_get_host_with_fc_connection(self):
- # Create a FC host
- del self._connector['initiator']
- helper = self.driver._helpers
- host_name = helper.create_host(self._connector)
-
- # Remove the first wwpn from connector, and then try get host
- wwpns = self._connector['wwpns']
- wwpns.remove(wwpns[0])
- host_name = helper.get_host_from_connector(self._connector)
-
- self.assertIsNotNone(host_name)
-
- def test_storwize_initiator_multiple_wwpns_connected(self):
-
- # Generate us a test volume
- volume = self._create_volume()
-
- # Fibre Channel volume type
- extra_spec = {'capabilities:storage_protocol': '<in> FC'}
- vol_type = volume_types.create(self.ctxt, 'FC', extra_spec)
-
- volume['volume_type_id'] = vol_type['id']
-
- # Make sure that the volumes have been created
- self._assert_vol_exists(volume['name'], True)
-
- # Set up one WWPN that won't match and one that will.
- self.driver._state['storage_nodes']['1']['WWPN'] = ['123456789ABCDEF0',
- 'AABBCCDDEEFF0010']
-
- wwpns = ['ff00000000000000', 'ff00000000000001']
- connector = {'host': 'storwize-svc-test', 'wwpns': wwpns}
-
- with mock.patch.object(storwize_svc_common.StorwizeHelpers,
- 'get_conn_fc_wwpns') as get_mappings:
- mapped_wwpns = ['AABBCCDDEEFF0001', 'AABBCCDDEEFF0002',
- 'AABBCCDDEEFF0010', 'AABBCCDDEEFF0012']
- get_mappings.return_value = mapped_wwpns
-
- # Initialize the connection
- init_ret = self.driver.initialize_connection(volume, connector)
-
- # Make sure we return all wwpns which where mapped as part of the
- # connection
- self.assertEqual(mapped_wwpns,
- init_ret['data']['target_wwn'])
-
- def test_storwize_terminate_connection(self):
- # create a FC volume
- volume_fc = self._create_volume()
- extra_spec = {'capabilities:storage_protocol': '<in> FC'}
- vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec)
- volume_fc['volume_type_id'] = vol_type_fc['id']
-
- # create a iSCSI volume
- volume_iSCSI = self._create_volume()
- extra_spec = {'capabilities:storage_protocol': '<in> iSCSI'}
- vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec)
- volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id']
-
- connector = {'host': 'storwize-svc-host',
- 'wwnns': ['20000090fa17311e', '20000090fa17311f'],
- 'wwpns': ['ff00000000000000', 'ff00000000000001'],
- 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'}
-
- self.driver.initialize_connection(volume_fc, connector)
- self.driver.initialize_connection(volume_iSCSI, connector)
- self.driver.terminate_connection(volume_iSCSI, connector)
- self.driver.terminate_connection(volume_fc, connector)
-
- def test_storwize_initiator_target_map(self):
- # Generate us a test volume
- volume = self._create_volume()
-
- # FIbre Channel volume type
- extra_spec = {'capabilities:storage_protocol': '<in> FC'}
- vol_type = volume_types.create(self.ctxt, 'FC', extra_spec)
-
- volume['volume_type_id'] = vol_type['id']
-
- # Make sure that the volumes have been created
- self._assert_vol_exists(volume['name'], True)
-
- wwpns = ['ff00000000000000', 'ff00000000000001']
- connector = {'host': 'storwize-svc-test', 'wwpns': wwpns}
-
- # Initialise the connection
- init_ret = self.driver.initialize_connection(volume, connector)
-
- # Check that the initiator_target_map is as expected
- init_data = {'driver_volume_type': 'fibre_channel',
- 'data': {'initiator_target_map':
- {'ff00000000000000': ['AABBCCDDEEFF0011'],
- 'ff00000000000001': ['AABBCCDDEEFF0011']},
- 'target_discovered': False,
- 'target_lun': 0,
- 'target_wwn': ['AABBCCDDEEFF0011'],
- 'volume_id': volume['id']
- }
- }
-
- self.assertEqual(init_data, init_ret)
-
- # Terminate connection
- term_ret = self.driver.terminate_connection(volume, connector)
-
- # Check that the initiator_target_map is as expected
- term_data = {'driver_volume_type': 'fibre_channel',
- 'data': {'initiator_target_map':
- {'ff00000000000000': ['AABBCCDDEEFF0011'],
- 'ff00000000000001': ['AABBCCDDEEFF0011']}
- }
- }
-
- self.assertEqual(term_data, term_ret)
-
def test_storwize_create_volume_with_replication_disable(self):
volume = self._generate_vol_info(None, None)
-# Copyright 2013 IBM Corp.
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-"""
-Volume driver for IBM Storwize family and SVC storage systems.
-
-Notes:
-1. If you specify both a password and a key file, this driver will use the
- key file only.
-2. When using a key file for authentication, it is up to the user or
- system administrator to store the private key in a safe manner.
-3. The defaults for creating volumes are "-rsize 2% -autoexpand
- -grainsize 256 -warning 0". These can be changed in the configuration
- file or by using volume types(recommended only for advanced users).
-
-Limitations:
-1. The driver expects CLI output in English, error messages may be in a
- localized format.
-2. Clones and creating volumes from snapshots, where the source and target
- are of different sizes, is not supported.
-
-"""
-
-import math
-import time
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_service import loopingcall
-from oslo_utils import excutils
-from oslo_utils import units
-
-from cinder import context
-from cinder import exception
-from cinder.i18n import _, _LE, _LI, _LW
-from cinder import utils
-from cinder.volume import driver
-from cinder.volume.drivers.ibm.storwize_svc import (
- replication as storwize_rep)
-from cinder.volume.drivers.ibm.storwize_svc import (
- storwize_svc_common as storwize_helpers)
-from cinder.volume.drivers.san import san
-from cinder.volume import volume_types
-from cinder.zonemanager import utils as fczm_utils
-
-LOG = logging.getLogger(__name__)
-
-storwize_svc_opts = [
- cfg.StrOpt('storwize_svc_volpool_name',
- default='volpool',
- help='Storage system storage pool for volumes'),
- cfg.IntOpt('storwize_svc_vol_rsize',
- default=2,
- min=-1, max=100,
- help='Storage system space-efficiency parameter for volumes '
- '(percentage)'),
- cfg.IntOpt('storwize_svc_vol_warning',
- default=0,
- min=-1, max=100,
- help='Storage system threshold for volume capacity warnings '
- '(percentage)'),
- cfg.BoolOpt('storwize_svc_vol_autoexpand',
- default=True,
- help='Storage system autoexpand parameter for volumes '
- '(True/False)'),
- cfg.IntOpt('storwize_svc_vol_grainsize',
- default=256,
- help='Storage system grain size parameter for volumes '
- '(32/64/128/256)'),
- cfg.BoolOpt('storwize_svc_vol_compression',
- default=False,
- help='Storage system compression option for volumes'),
- cfg.BoolOpt('storwize_svc_vol_easytier',
- default=True,
- help='Enable Easy Tier for volumes'),
- cfg.IntOpt('storwize_svc_vol_iogrp',
- default=0,
- help='The I/O group in which to allocate volumes'),
- cfg.IntOpt('storwize_svc_flashcopy_timeout',
- default=120,
- min=1, max=600,
- help='Maximum number of seconds to wait for FlashCopy to be '
- 'prepared.'),
- cfg.StrOpt('storwize_svc_connection_protocol',
- default='iSCSI',
- help='Connection protocol (iSCSI/FC)'),
- cfg.BoolOpt('storwize_svc_iscsi_chap_enabled',
- default=True,
- help='Configure CHAP authentication for iSCSI connections '
- '(Default: Enabled)'),
- cfg.BoolOpt('storwize_svc_multipath_enabled',
- default=False,
- help='This option no longer has any affect. It is deprecated '
- 'and will be removed in the next release.',
- deprecated_for_removal=True),
- cfg.BoolOpt('storwize_svc_multihostmap_enabled',
- default=True,
- help='Allows vdisk to multi host mapping'),
- cfg.BoolOpt('storwize_svc_allow_tenant_qos',
- default=False,
- help='Allow tenants to specify QOS on create'),
- cfg.StrOpt('storwize_svc_stretched_cluster_partner',
- help='If operating in stretched cluster mode, specify the '
- 'name of the pool in which mirrored copies are stored.'
- 'Example: "pool2"'),
- cfg.BoolOpt('storwize_svc_vol_nofmtdisk',
- default=False,
- help='Specifies that the volume not be formatted during '
- 'creation.'),
- cfg.IntOpt('storwize_svc_flashcopy_rate',
- default=50,
- min=1, max=100,
- help='Specifies the Storwize FlashCopy copy rate to be used '
- 'when creating a full volume copy. The default is rate '
- 'is 50, and the valid rates are 1-100.'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(storwize_svc_opts)
-
-
-class StorwizeSVCDriver(san.SanDriver,
- driver.ManageableVD,
- driver.ExtendVD, driver.SnapshotVD,
- driver.MigrateVD, driver.ReplicaVD,
- driver.ConsistencyGroupVD,
- driver.CloneableImageVD, driver.TransferVD):
- """IBM Storwize V7000 and SVC iSCSI/FC volume driver.
-
- Version history:
- 1.0 - Initial driver
- 1.1 - FC support, create_cloned_volume, volume type support,
- get_volume_stats, minor bug fixes
- 1.2.0 - Added retype
- 1.2.1 - Code refactor, improved exception handling
- 1.2.2 - Fix bug #1274123 (races in host-related functions)
- 1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim to
- lsfabric, clear unused data from connections, ensure matching
- WWPNs by comparing lower case
- 1.2.4 - Fix bug #1278035 (async migration/retype)
- 1.2.5 - Added support for manage_existing (unmanage is inherited)
- 1.2.6 - Added QoS support in terms of I/O throttling rate
- 1.3.1 - Added support for volume replication
- 1.3.2 - Added support for consistency group
- 1.3.3 - Update driver to use ABC metaclasses
- """
-
- VERSION = "1.3.3"
- VDISKCOPYOPS_INTERVAL = 600
-
- def __init__(self, *args, **kwargs):
- super(StorwizeSVCDriver, self).__init__(*args, **kwargs)
- self.configuration.append_config_values(storwize_svc_opts)
- self._helpers = storwize_helpers.StorwizeHelpers(self._run_ssh)
- self._vdiskcopyops = {}
- self._vdiskcopyops_loop = None
- self.replication = None
- self._state = {'storage_nodes': {},
- 'enabled_protocols': set(),
- 'compression_enabled': False,
- 'available_iogrps': [],
- 'system_name': None,
- 'system_id': None,
- 'code_level': None,
- }
- # Storwize has the limitation that can not burst more than 3 new ssh
- # connections within 1 second. So slow down the initialization.
- time.sleep(1)
-
- def do_setup(self, ctxt):
- """Check that we have all configuration details from the storage."""
- LOG.debug('enter: do_setup')
-
- # Get storage system name, id, and code level
- self._state.update(self._helpers.get_system_info())
-
- # Get the replication helpers
- self.replication = storwize_rep.StorwizeSVCReplication.factory(self)
-
- # Validate that the pool exists
- pool = self.configuration.storwize_svc_volpool_name
- try:
- self._helpers.get_pool_attrs(pool)
- except exception.VolumeBackendAPIException:
- msg = _('Failed getting details for pool %s.') % pool
- raise exception.InvalidInput(reason=msg)
-
- # Check if compression is supported
- self._state['compression_enabled'] = (self._helpers.
- compression_enabled())
-
- # Get the available I/O groups
- self._state['available_iogrps'] = (self._helpers.
- get_available_io_groups())
-
- # Get the iSCSI and FC names of the Storwize/SVC nodes
- self._state['storage_nodes'] = self._helpers.get_node_info()
-
- # Add the iSCSI IP addresses and WWPNs to the storage node info
- self._helpers.add_iscsi_ip_addrs(self._state['storage_nodes'])
- self._helpers.add_fc_wwpns(self._state['storage_nodes'])
-
- # For each node, check what connection modes it supports. Delete any
- # nodes that do not support any types (may be partially configured).
- to_delete = []
- for k, node in self._state['storage_nodes'].items():
- if ((len(node['ipv4']) or len(node['ipv6']))
- and len(node['iscsi_name'])):
- node['enabled_protocols'].append('iSCSI')
- self._state['enabled_protocols'].add('iSCSI')
- if len(node['WWPN']):
- node['enabled_protocols'].append('FC')
- self._state['enabled_protocols'].add('FC')
- if not len(node['enabled_protocols']):
- to_delete.append(k)
- for delkey in to_delete:
- del self._state['storage_nodes'][delkey]
-
- # Make sure we have at least one node configured
- if not len(self._state['storage_nodes']):
- msg = _('do_setup: No configured nodes.')
- LOG.error(msg)
- raise exception.VolumeDriverException(message=msg)
-
- # Build the list of in-progress vdisk copy operations
- if ctxt is None:
- admin_context = context.get_admin_context()
- else:
- admin_context = ctxt.elevated()
- volumes = self.db.volume_get_all_by_host(admin_context, self.host)
-
- for volume in volumes:
- metadata = self.db.volume_admin_metadata_get(admin_context,
- volume['id'])
- curr_ops = metadata.get('vdiskcopyops', None)
- if curr_ops:
- ops = [tuple(x.split(':')) for x in curr_ops.split(';')]
- self._vdiskcopyops[volume['id']] = ops
-
- # if vdiskcopy exists in database, start the looping call
- if len(self._vdiskcopyops) >= 1:
- self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
- self._check_volume_copy_ops)
- self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
-
- LOG.debug('leave: do_setup')
-
- def check_for_setup_error(self):
- """Ensure that the flags are set properly."""
- LOG.debug('enter: check_for_setup_error')
-
- # Check that we have the system ID information
- if self._state['system_name'] is None:
- exception_msg = (_('Unable to determine system name.'))
- raise exception.VolumeBackendAPIException(data=exception_msg)
- if self._state['system_id'] is None:
- exception_msg = (_('Unable to determine system id.'))
- raise exception.VolumeBackendAPIException(data=exception_msg)
-
- required_flags = ['san_ip', 'san_ssh_port', 'san_login',
- 'storwize_svc_volpool_name']
- for flag in required_flags:
- if not self.configuration.safe_get(flag):
- raise exception.InvalidInput(reason=_('%s is not set.') % flag)
-
- # Ensure that either password or keyfile were set
- if not (self.configuration.san_password or
- self.configuration.san_private_key):
- raise exception.InvalidInput(
- reason=_('Password or SSH private key is required for '
- 'authentication: set either san_password or '
- 'san_private_key option.'))
-
- opts = self._helpers.build_default_opts(self.configuration)
- self._helpers.check_vdisk_opts(self._state, opts)
-
- LOG.debug('leave: check_for_setup_error')
-
- def ensure_export(self, ctxt, volume):
- """Check that the volume exists on the storage.
-
- The system does not "export" volumes as a Linux iSCSI target does,
- and therefore we just check that the volume exists on the storage.
- """
- volume_defined = self._helpers.is_vdisk_defined(volume['name'])
- if not volume_defined:
- LOG.error(_LE('ensure_export: Volume %s not found on storage.'),
- volume['name'])
-
- def create_export(self, ctxt, volume, connector):
- model_update = None
- return model_update
-
- def remove_export(self, ctxt, volume):
- pass
-
- def validate_connector(self, connector):
- """Check connector for at least one enabled protocol (iSCSI/FC)."""
- valid = False
- if ('iSCSI' in self._state['enabled_protocols'] and
- 'initiator' in connector):
- valid = True
- if 'FC' in self._state['enabled_protocols'] and 'wwpns' in connector:
- valid = True
- if not valid:
- LOG.error(_LE('The connector does not contain the required '
- 'information.'))
- raise exception.InvalidConnectorException(
- missing='initiator or wwpns')
-
- def _get_vdisk_params(self, type_id, volume_type=None,
- volume_metadata=None):
- return self._helpers.get_vdisk_params(self.configuration, self._state,
- type_id, volume_type=volume_type,
- volume_metadata=volume_metadata)
-
- @fczm_utils.AddFCZone
- @utils.synchronized('storwize-host', external=True)
- def initialize_connection(self, volume, connector):
- """Perform necessary work to make an iSCSI/FC connection.
-
- To be able to create an iSCSI/FC connection from a given host to a
- volume, we must:
- 1. Translate the given iSCSI name or WWNN to a host name
- 2. Create new host on the storage system if it does not yet exist
- 3. Map the volume to the host if it is not already done
- 4. Return the connection information for relevant nodes (in the
- proper I/O group)
-
- """
-
- LOG.debug('enter: initialize_connection: volume %(vol)s with connector'
- ' %(conn)s', {'vol': volume['id'], 'conn': connector})
-
- vol_opts = self._get_vdisk_params(volume['volume_type_id'])
- volume_name = volume['name']
-
- # Delete irrelevant connection information that later could result
- # in unwanted behaviour. For example, if FC is used yet the hosts
- # return iSCSI data, the driver will try to create the iSCSI connection
- # which can result in a nice error about reaching the per-host maximum
- # iSCSI initiator limit.
- # First make a copy so we don't mess with a caller's connector.
- connector = connector.copy()
- if vol_opts['protocol'] == 'FC':
- connector.pop('initiator', None)
- elif vol_opts['protocol'] == 'iSCSI':
- connector.pop('wwnns', None)
- connector.pop('wwpns', None)
-
- # Check if a host object is defined for this host name
- host_name = self._helpers.get_host_from_connector(connector)
- if host_name is None:
- # Host does not exist - add a new host to Storwize/SVC
- host_name = self._helpers.create_host(connector)
-
- if vol_opts['protocol'] == 'iSCSI':
- chap_secret = self._helpers.get_chap_secret_for_host(host_name)
- chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled
- if chap_enabled and chap_secret is None:
- chap_secret = self._helpers.add_chap_secret_to_host(host_name)
- elif not chap_enabled and chap_secret:
- LOG.warning(_LW('CHAP secret exists for host but CHAP is '
- 'disabled.'))
-
- volume_attributes = self._helpers.get_vdisk_attributes(volume_name)
- if volume_attributes is None:
- msg = (_('initialize_connection: Failed to get attributes'
- ' for volume %s.') % volume_name)
- LOG.error(msg)
- raise exception.VolumeDriverException(message=msg)
-
- multihostmap = self.configuration.storwize_svc_multihostmap_enabled
- lun_id = self._helpers.map_vol_to_host(volume_name, host_name,
- multihostmap)
- try:
- preferred_node = volume_attributes['preferred_node_id']
- IO_group = volume_attributes['IO_group_id']
- except KeyError as e:
- LOG.error(_LE('Did not find expected column name in '
- 'lsvdisk: %s.'), e)
- raise exception.VolumeBackendAPIException(
- data=_('initialize_connection: Missing volume attribute for '
- 'volume %s.') % volume_name)
-
- try:
- # Get preferred node and other nodes in I/O group
- preferred_node_entry = None
- io_group_nodes = []
- for node in self._state['storage_nodes'].values():
- if vol_opts['protocol'] not in node['enabled_protocols']:
- continue
- if node['id'] == preferred_node:
- preferred_node_entry = node
- if node['IO_group'] == IO_group:
- io_group_nodes.append(node)
-
- if not len(io_group_nodes):
- msg = (_('initialize_connection: No node found in '
- 'I/O group %(gid)s for volume %(vol)s.') %
- {'gid': IO_group, 'vol': volume_name})
- LOG.error(msg)
- raise exception.VolumeBackendAPIException(data=msg)
-
- if not preferred_node_entry:
- # Get 1st node in I/O group
- preferred_node_entry = io_group_nodes[0]
- LOG.warning(_LW('initialize_connection: Did not find a '
- 'preferred node for volume %s.'), volume_name)
-
- properties = {}
- properties['target_discovered'] = False
- properties['target_lun'] = lun_id
- properties['volume_id'] = volume['id']
- if vol_opts['protocol'] == 'iSCSI':
- type_str = 'iscsi'
- if len(preferred_node_entry['ipv4']):
- ipaddr = preferred_node_entry['ipv4'][0]
- else:
- ipaddr = preferred_node_entry['ipv6'][0]
- properties['target_portal'] = '%s:%s' % (ipaddr, '3260')
- properties['target_iqn'] = preferred_node_entry['iscsi_name']
- if chap_secret:
- properties['auth_method'] = 'CHAP'
- properties['auth_username'] = connector['initiator']
- properties['auth_password'] = chap_secret
- properties['discovery_auth_method'] = 'CHAP'
- properties['discovery_auth_username'] = (
- connector['initiator'])
- properties['discovery_auth_password'] = chap_secret
- else:
- type_str = 'fibre_channel'
- conn_wwpns = self._helpers.get_conn_fc_wwpns(host_name)
-
- # If conn_wwpns is empty, then that means that there were
- # no target ports with visibility to any of the initiators
- # so we return all target ports.
- if len(conn_wwpns) == 0:
- for node in self._state['storage_nodes'].values():
- conn_wwpns.extend(node['WWPN'])
-
- properties['target_wwn'] = conn_wwpns
-
- i_t_map = self._make_initiator_target_map(connector['wwpns'],
- conn_wwpns)
- properties['initiator_target_map'] = i_t_map
-
- # specific for z/VM, refer to cinder bug 1323993
- if "zvm_fcp" in connector:
- properties['zvm_fcp'] = connector['zvm_fcp']
- except Exception:
- with excutils.save_and_reraise_exception():
- self.terminate_connection(volume, connector)
- LOG.error(_LE('initialize_connection: Failed '
- 'to collect return '
- 'properties for volume %(vol)s and connector '
- '%(conn)s.\n'), {'vol': volume,
- 'conn': connector})
-
- LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n '
- 'connector %(conn)s\n properties: %(prop)s',
- {'vol': volume['id'], 'conn': connector,
- 'prop': properties})
-
- return {'driver_volume_type': type_str, 'data': properties, }
-
- def _make_initiator_target_map(self, initiator_wwpns, target_wwpns):
- """Build a simplistic all-to-all mapping."""
- i_t_map = {}
- for i_wwpn in initiator_wwpns:
- i_t_map[str(i_wwpn)] = []
- for t_wwpn in target_wwpns:
- i_t_map[i_wwpn].append(t_wwpn)
-
- return i_t_map
-
- @fczm_utils.RemoveFCZone
- @utils.synchronized('storwize-host', external=True)
- def terminate_connection(self, volume, connector, **kwargs):
- """Cleanup after an iSCSI connection has been terminated.
-
- When we clean up a terminated connection between a given connector
- and volume, we:
- 1. Translate the given connector to a host name
- 2. Remove the volume-to-host mapping if it exists
- 3. Delete the host if it has no more mappings (hosts are created
- automatically by this driver when mappings are created)
- """
- LOG.debug('enter: terminate_connection: volume %(vol)s with connector'
- ' %(conn)s', {'vol': volume['id'], 'conn': connector})
- vol_name = volume['name']
- info = {}
- if 'host' in connector:
- # maybe two hosts on the storage, one is for FC and the other for
- # iSCSI, so get host according to protocol
- vol_opts = self._get_vdisk_params(volume['volume_type_id'])
- connector = connector.copy()
- if vol_opts['protocol'] == 'FC':
- connector.pop('initiator', None)
- info = {'driver_volume_type': 'fibre_channel',
- 'data': {}}
- elif vol_opts['protocol'] == 'iSCSI':
- connector.pop('wwnns', None)
- connector.pop('wwpns', None)
- info = {'driver_volume_type': 'iscsi',
- 'data': {}}
-
- host_name = self._helpers.get_host_from_connector(connector)
- if host_name is None:
- msg = (_('terminate_connection: Failed to get host name from'
- ' connector.'))
- LOG.error(msg)
- raise exception.VolumeDriverException(message=msg)
- else:
- # See bug #1244257
- host_name = None
-
- # Unmap volumes, if hostname is None, need to get value from vdiskmap
- host_name = self._helpers.unmap_vol_from_host(vol_name, host_name)
-
- # Host_name could be none
- if host_name:
- resp = self._helpers.check_host_mapped_vols(host_name)
- if not len(resp):
- LOG.info(_LI("Need to remove FC Zone, building initiator "
- "target map."))
- # Build info data structure for zone removing
- if 'wwpns' in connector and host_name:
- target_wwpns = self._helpers.get_conn_fc_wwpns(host_name)
- init_targ_map = (self._make_initiator_target_map
- (connector['wwpns'],
- target_wwpns))
- info['data'] = {'initiator_target_map': init_targ_map}
- # No volume mapped to the host, delete host from array
- self._helpers.delete_host(host_name)
-
- LOG.debug('leave: terminate_connection: volume %(vol)s with '
- 'connector %(conn)s', {'vol': volume['id'],
- 'conn': connector})
- return info
-
- def create_volume(self, volume):
- opts = self._get_vdisk_params(volume['volume_type_id'],
- volume_metadata=
- volume.get('volume_metadata'))
- pool = self.configuration.storwize_svc_volpool_name
- self._helpers.create_vdisk(volume['name'], str(volume['size']),
- 'gb', pool, opts)
- if opts['qos']:
- self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
-
- model_update = None
- if 'replication' in opts and opts['replication']:
- ctxt = context.get_admin_context()
- model_update = self.replication.create_replica(ctxt, volume)
- return model_update
-
- def delete_volume(self, volume):
- self._helpers.delete_vdisk(volume['name'], False)
-
- if volume['id'] in self._vdiskcopyops:
- del self._vdiskcopyops[volume['id']]
-
- if not len(self._vdiskcopyops):
- self._vdiskcopyops_loop.stop()
- self._vdiskcopyops_loop = None
-
- def create_snapshot(self, snapshot):
- ctxt = context.get_admin_context()
- try:
- source_vol = self.db.volume_get(ctxt, snapshot['volume_id'])
- except Exception:
- msg = (_('create_snapshot: get source volume failed.'))
- LOG.error(msg)
- raise exception.VolumeDriverException(message=msg)
- opts = self._get_vdisk_params(source_vol['volume_type_id'])
- self._helpers.create_copy(snapshot['volume_name'], snapshot['name'],
- snapshot['volume_id'], self.configuration,
- opts, False)
-
- def delete_snapshot(self, snapshot):
- self._helpers.delete_vdisk(snapshot['name'], False)
-
- def create_volume_from_snapshot(self, volume, snapshot):
- if volume['size'] != snapshot['volume_size']:
- msg = (_('create_volume_from_snapshot: Source and destination '
- 'size differ.'))
- LOG.error(msg)
- raise exception.InvalidInput(message=msg)
-
- opts = self._get_vdisk_params(volume['volume_type_id'],
- volume_metadata=
- volume.get('volume_metadata'))
- self._helpers.create_copy(snapshot['name'], volume['name'],
- snapshot['id'], self.configuration,
- opts, True)
- if opts['qos']:
- self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
-
- if 'replication' in opts and opts['replication']:
- ctxt = context.get_admin_context()
- replica_status = self.replication.create_replica(ctxt, volume)
- if replica_status:
- return replica_status
-
- def create_cloned_volume(self, tgt_volume, src_volume):
- if src_volume['size'] != tgt_volume['size']:
- msg = (_('create_cloned_volume: Source and destination '
- 'size differ.'))
- LOG.error(msg)
- raise exception.InvalidInput(message=msg)
-
- opts = self._get_vdisk_params(tgt_volume['volume_type_id'],
- volume_metadata=
- tgt_volume.get('volume_metadata'))
- self._helpers.create_copy(src_volume['name'], tgt_volume['name'],
- src_volume['id'], self.configuration,
- opts, True)
- if opts['qos']:
- self._helpers.add_vdisk_qos(tgt_volume['name'], opts['qos'])
-
- if 'replication' in opts and opts['replication']:
- ctxt = context.get_admin_context()
- replica_status = self.replication.create_replica(ctxt, tgt_volume)
- if replica_status:
- return replica_status
-
- def extend_volume(self, volume, new_size):
- LOG.debug('enter: extend_volume: volume %s', volume['id'])
- ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'],
- allow_snaps=False)
- if not ret:
- msg = (_('extend_volume: Extending a volume with snapshots is not '
- 'supported.'))
- LOG.error(msg)
- raise exception.VolumeDriverException(message=msg)
-
- extend_amt = int(new_size) - volume['size']
- self._helpers.extend_vdisk(volume['name'], extend_amt)
- LOG.debug('leave: extend_volume: volume %s', volume['id'])
-
- def add_vdisk_copy(self, volume, dest_pool, vol_type):
- return self._helpers.add_vdisk_copy(volume, dest_pool,
- vol_type, self._state,
- self.configuration)
-
- def _add_vdisk_copy_op(self, ctxt, volume, new_op):
- metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
- volume['id'])
- curr_ops = metadata.get('vdiskcopyops', None)
- if curr_ops:
- curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
- new_ops_list = curr_ops_list.append(new_op)
- else:
- new_ops_list = [new_op]
- new_ops_str = ';'.join([':'.join(x) for x in new_ops_list])
- self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
- {'vdiskcopyops': new_ops_str},
- False)
- if volume['id'] in self._vdiskcopyops:
- self._vdiskcopyops[volume['id']].append(new_op)
- else:
- self._vdiskcopyops[volume['id']] = [new_op]
-
- # We added the first copy operation, so start the looping call
- if len(self._vdiskcopyops) == 1:
- self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
- self._check_volume_copy_ops)
- self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
-
- def _rm_vdisk_copy_op(self, ctxt, volume, orig_copy_id, new_copy_id):
- try:
- self._vdiskcopyops[volume['id']].remove((orig_copy_id,
- new_copy_id))
- if not len(self._vdiskcopyops[volume['id']]):
- del self._vdiskcopyops[volume['id']]
- if not len(self._vdiskcopyops):
- self._vdiskcopyops_loop.stop()
- self._vdiskcopyops_loop = None
- except KeyError:
- LOG.error(_LE('_rm_vdisk_copy_op: Volume %s does not have any '
- 'registered vdisk copy operations.'), volume['id'])
- return
- except ValueError:
- LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s does not have '
- 'the specified vdisk copy operation: orig=%(orig)s '
- 'new=%(new)s.'),
- {'vol': volume['id'], 'orig': orig_copy_id,
- 'new': new_copy_id})
- return
-
- metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
- volume['id'])
- curr_ops = metadata.get('vdiskcopyops', None)
- if not curr_ops:
- LOG.error(_LE('_rm_vdisk_copy_op: Volume metadata %s does not '
- 'have any registered vdisk copy operations.'),
- volume['id'])
- return
- curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
- try:
- curr_ops_list.remove((orig_copy_id, new_copy_id))
- except ValueError:
- LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s metadata does '
- 'not have the specified vdisk copy operation: '
- 'orig=%(orig)s new=%(new)s.'),
- {'vol': volume['id'], 'orig': orig_copy_id,
- 'new': new_copy_id})
- return
-
- if len(curr_ops_list):
- new_ops_str = ';'.join([':'.join(x) for x in curr_ops_list])
- self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
- {'vdiskcopyops': new_ops_str},
- False)
- else:
- self.db.volume_admin_metadata_delete(ctxt.elevated(), volume['id'],
- 'vdiskcopyops')
-
- def promote_replica(self, ctxt, volume):
- return self.replication.promote_replica(volume)
-
- def reenable_replication(self, ctxt, volume):
- return self.replication.reenable_replication(volume)
-
- def create_replica_test_volume(self, tgt_volume, src_volume):
- if src_volume['size'] != tgt_volume['size']:
- msg = (_('create_cloned_volume: Source and destination '
- 'size differ.'))
- LOG.error(msg)
- raise exception.InvalidInput(message=msg)
- replica_status = self.replication.test_replica(tgt_volume,
- src_volume)
- return replica_status
-
- def get_replication_status(self, ctxt, volume):
- replica_status = None
- if self.replication:
- replica_status = self.replication.get_replication_status(volume)
- return replica_status
-
- def _check_volume_copy_ops(self):
- LOG.debug("Enter: update volume copy status.")
- ctxt = context.get_admin_context()
- copy_items = list(self._vdiskcopyops.items())
- for vol_id, copy_ops in copy_items:
- try:
- volume = self.db.volume_get(ctxt, vol_id)
- except Exception:
- LOG.warning(_LW('Volume %s does not exist.'), vol_id)
- del self._vdiskcopyops[vol_id]
- if not len(self._vdiskcopyops):
- self._vdiskcopyops_loop.stop()
- self._vdiskcopyops_loop = None
- continue
-
- for copy_op in copy_ops:
- try:
- synced = self._helpers.is_vdisk_copy_synced(volume['name'],
- copy_op[1])
- except Exception:
- LOG.info(_LI('_check_volume_copy_ops: Volume %(vol)s does '
- 'not have the specified vdisk copy '
- 'operation: orig=%(orig)s new=%(new)s.'),
- {'vol': volume['id'], 'orig': copy_op[0],
- 'new': copy_op[1]})
- else:
- if synced:
- self._helpers.rm_vdisk_copy(volume['name'], copy_op[0])
- self._rm_vdisk_copy_op(ctxt, volume, copy_op[0],
- copy_op[1])
- LOG.debug("Exit: update volume copy status.")
-
- def migrate_volume(self, ctxt, volume, host):
- """Migrate directly if source and dest are managed by same storage.
-
- We create a new vdisk copy in the desired pool, and add the original
- vdisk copy to the admin_metadata of the volume to be deleted. The
- deletion will occur using a periodic task once the new copy is synced.
-
- :param ctxt: Context
- :param volume: A dictionary describing the volume to migrate
- :param host: A dictionary describing the host to migrate to, where
- host['host'] is its name, and host['capabilities'] is a
- dictionary of its reported capabilities.
- """
- LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s',
- {'id': volume['id'], 'host': host['host']})
-
- false_ret = (False, None)
- dest_pool = self._helpers.can_migrate_to_host(host, self._state)
- if dest_pool is None:
- return false_ret
-
- ctxt = context.get_admin_context()
- if volume['volume_type_id'] is not None:
- volume_type_id = volume['volume_type_id']
- vol_type = volume_types.get_volume_type(ctxt, volume_type_id)
- else:
- vol_type = None
-
- self._check_volume_copy_ops()
- new_op = self.add_vdisk_copy(volume['name'], dest_pool, vol_type)
- self._add_vdisk_copy_op(ctxt, volume, new_op)
- LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s',
- {'id': volume['id'], 'host': host['host']})
- return (True, None)
-
- def retype(self, ctxt, volume, new_type, diff, host):
- """Convert the volume to be of the new type.
-
- Returns a boolean indicating whether the retype occurred.
-
- :param ctxt: Context
- :param volume: A dictionary describing the volume to migrate
- :param new_type: A dictionary describing the volume type to convert to
- :param diff: A dictionary with the difference between the two types
- :param host: A dictionary describing the host to migrate to, where
- host['host'] is its name, and host['capabilities'] is a
- dictionary of its reported capabilities.
- """
- def retype_iogrp_property(volume, new, old):
- if new != old:
- self._helpers.change_vdisk_iogrp(volume['name'],
- self._state, (new, old))
-
- LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
- 'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
- 'new_type': new_type,
- 'diff': diff,
- 'host': host})
-
- ignore_keys = ['protocol']
- no_copy_keys = ['warning', 'autoexpand', 'easytier']
- copy_keys = ['rsize', 'grainsize', 'compression']
- all_keys = ignore_keys + no_copy_keys + copy_keys
- old_opts = self._get_vdisk_params(volume['volume_type_id'],
- volume_metadata=
- volume.get('volume_matadata'))
- new_opts = self._get_vdisk_params(new_type['id'],
- volume_type=new_type)
-
- # Check if retype affects volume replication
- model_update = None
- old_type_replication = old_opts.get('replication', False)
- new_type_replication = new_opts.get('replication', False)
-
- # Delete replica if needed
- if old_type_replication and not new_type_replication:
- self.replication.delete_replica(volume)
- model_update = {'replication_status': 'disabled',
- 'replication_driver_data': None,
- 'replication_extended_status': None}
-
- vdisk_changes = []
- need_copy = False
- for key in all_keys:
- if old_opts[key] != new_opts[key]:
- if key in copy_keys:
- need_copy = True
- break
- elif key in no_copy_keys:
- vdisk_changes.append(key)
-
- dest_location = host['capabilities'].get('location_info')
- if self._stats['location_info'] != dest_location:
- need_copy = True
-
- if need_copy:
- self._check_volume_copy_ops()
- dest_pool = self._helpers.can_migrate_to_host(host, self._state)
- if dest_pool is None:
- return False
-
- # If volume is replicated, can't copy
- if new_type_replication:
- msg = (_('Unable to retype: Current action needs volume-copy,'
- ' it is not allowed when new type is replication.'
- ' Volume = %s'), volume['id'])
- raise exception.VolumeDriverException(message=msg)
-
- retype_iogrp_property(volume,
- new_opts['iogrp'],
- old_opts['iogrp'])
- try:
- new_op = self.add_vdisk_copy(volume['name'],
- dest_pool,
- new_type)
- self._add_vdisk_copy_op(ctxt, volume, new_op)
- except exception.VolumeDriverException:
- # roll back changing iogrp property
- retype_iogrp_property(volume, old_opts['iogrp'],
- new_opts['iogrp'])
- msg = (_('Unable to retype: A copy of volume %s exists. '
- 'Retyping would exceed the limit of 2 copies.'),
- volume['id'])
- raise exception.VolumeDriverException(message=msg)
- else:
- retype_iogrp_property(volume, new_opts['iogrp'], old_opts['iogrp'])
-
- self._helpers.change_vdisk_options(volume['name'], vdisk_changes,
- new_opts, self._state)
-
- if new_opts['qos']:
- # Add the new QoS setting to the volume. If the volume has an
- # old QoS setting, it will be overwritten.
- self._helpers.update_vdisk_qos(volume['name'], new_opts['qos'])
- elif old_opts['qos']:
- # If the old_opts contain QoS keys, disable them.
- self._helpers.disable_vdisk_qos(volume['name'], old_opts['qos'])
-
- # Add replica if needed
- if not old_type_replication and new_type_replication:
- model_update = self.replication.create_replica(ctxt, volume,
- new_type)
-
- LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
- 'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
- 'new_type': new_type,
- 'diff': diff,
- 'host': host['host']})
- return True, model_update
-
- def update_migrated_volume(self, ctxt, volume, new_volume,
- original_volume_status):
- """Return model update from Storwize for migrated volume.
-
- This method should rename the back-end volume name(id) on the
- destination host back to its original name(id) on the source host.
-
- :param ctxt: The context used to run the method update_migrated_volume
- :param volume: The original volume that was migrated to this backend
- :param new_volume: The migration volume object that was created on
- this backend as part of the migration process
- :param original_volume_status: The status of the original volume
- :returns: model_update to update DB with any needed changes
- """
- current_name = CONF.volume_name_template % new_volume['id']
- original_volume_name = CONF.volume_name_template % volume['id']
- try:
- self._helpers.rename_vdisk(current_name, original_volume_name)
- except exception.VolumeBackendAPIException:
- LOG.error(_LE('Unable to rename the logical volume '
- 'for volume: %s'), volume['id'])
- return {'_name_id': new_volume['_name_id'] or new_volume['id']}
- # If the back-end name(id) for the volume has been renamed,
- # it is OK for the volume to keep the original name(id) and there is
- # no need to use the column "_name_id" to establish the mapping
- # relationship between the volume id and the back-end volume
- # name(id).
- # Set the key "_name_id" to None for a successful rename.
- model_update = {'_name_id': None}
- return model_update
-
- def manage_existing(self, volume, ref):
- """Manages an existing vdisk.
-
- Renames the vdisk to match the expected name for the volume.
- Error checking done by manage_existing_get_size is not repeated -
- if we got here then we have a vdisk that isn't in use (or we don't
- care if it is in use.
- """
- vdisk = self._helpers.vdisk_by_uid(ref['source-id'])
- if vdisk is None:
- reason = (_('No vdisk with the UID specified by source-id %s.')
- % ref['source-id'])
- raise exception.ManageExistingInvalidReference(existing_ref=ref,
- reason=reason)
- self._helpers.rename_vdisk(vdisk['name'], volume['name'])
-
- def manage_existing_get_size(self, volume, ref):
- """Return size of an existing Vdisk for manage_existing.
-
- existing_ref is a dictionary of the form:
- {'source-id': <uid of disk>}
-
- Optional elements are:
- 'manage_if_in_use': True/False (default is False)
- If set to True, a volume will be managed even if it is currently
- attached to a host system.
- """
-
- # Check that the reference is valid
- if 'source-id' not in ref:
- reason = _('Reference must contain source-id element.')
- raise exception.ManageExistingInvalidReference(existing_ref=ref,
- reason=reason)
-
- # Check for existence of the vdisk
- vdisk = self._helpers.vdisk_by_uid(ref['source-id'])
- if vdisk is None:
- reason = (_('No vdisk with the UID specified by source-id %s.')
- % (ref['source-id']))
- raise exception.ManageExistingInvalidReference(existing_ref=ref,
- reason=reason)
-
- # Check if the disk is in use, if we need to.
- manage_if_in_use = ref.get('manage_if_in_use', False)
- if (not manage_if_in_use and
- self._helpers.is_vdisk_in_use(vdisk['name'])):
- reason = _('The specified vdisk is mapped to a host.')
- raise exception.ManageExistingInvalidReference(existing_ref=ref,
- reason=reason)
-
- return int(math.ceil(float(vdisk['capacity']) / units.Gi))
-
- def unmanage(self, volume):
- """Remove the specified volume from Cinder management."""
- pass
-
- def get_volume_stats(self, refresh=False):
- """Get volume stats.
-
- If we haven't gotten stats yet or 'refresh' is True,
- run update the stats first.
- """
- if not self._stats or refresh:
- self._update_volume_stats()
-
- return self._stats
-
- def create_consistencygroup(self, context, group):
- """Create a consistency group.
-
- IBM Storwize will create CG until cg-snapshot creation,
- db will maintain the volumes and CG relationship.
- """
- LOG.debug("Creating consistency group.")
- model_update = {'status': 'available'}
- return model_update
-
- def delete_consistencygroup(self, context, group, volumes):
- """Deletes a consistency group.
-
- IBM Storwize will delete the volumes of the CG.
- """
- LOG.debug("Deleting consistency group.")
- model_update = {}
- model_update['status'] = 'deleted'
- volumes = self.db.volume_get_all_by_group(context, group['id'])
-
- for volume in volumes:
- try:
- self._helpers.delete_vdisk(volume['name'], True)
- volume['status'] = 'deleted'
- except exception.VolumeBackendAPIException as err:
- volume['status'] = 'error_deleting'
- if model_update['status'] != 'error_deleting':
- model_update['status'] = 'error_deleting'
- LOG.error(_LE("Failed to delete the volume %(vol)s of CG. "
- "Exception: %(exception)s."),
- {'vol': volume['name'], 'exception': err})
- return model_update, volumes
-
- def create_cgsnapshot(self, ctxt, cgsnapshot, snapshots):
- """Creates a cgsnapshot."""
- # Use cgsnapshot id as cg name
- cg_name = 'cg_snap-' + cgsnapshot['id']
- # Create new cg as cg_snapshot
- self._helpers.create_fc_consistgrp(cg_name)
-
- snapshots = self.db.snapshot_get_all_for_cgsnapshot(
- ctxt, cgsnapshot['id'])
- timeout = self.configuration.storwize_svc_flashcopy_timeout
-
- model_update, snapshots_model = (
- self._helpers.run_consistgrp_snapshots(cg_name,
- snapshots,
- self._state,
- self.configuration,
- timeout))
-
- return model_update, snapshots_model
-
- def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
- """Deletes a cgsnapshot."""
- cgsnapshot_id = cgsnapshot['id']
- cg_name = 'cg_snap-' + cgsnapshot_id
-
- snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
- cgsnapshot_id)
-
- model_update, snapshots_model = (
- self._helpers.delete_consistgrp_snapshots(cg_name,
- snapshots))
-
- return model_update, snapshots_model
-
- def _update_volume_stats(self):
- """Retrieve stats info from volume group."""
-
- LOG.debug("Updating volume stats.")
- data = {}
-
- data['vendor_name'] = 'IBM'
- data['driver_version'] = self.VERSION
- data['storage_protocol'] = list(self._state['enabled_protocols'])
-
- data['total_capacity_gb'] = 0 # To be overwritten
- data['free_capacity_gb'] = 0 # To be overwritten
- data['reserved_percentage'] = self.configuration.reserved_percentage
- data['multiattach'] = (self.configuration.
- storwize_svc_multihostmap_enabled)
- data['QoS_support'] = True
- data['consistencygroup_support'] = True
-
- pool = self.configuration.storwize_svc_volpool_name
- backend_name = self.configuration.safe_get('volume_backend_name')
- if not backend_name:
- backend_name = '%s_%s' % (self._state['system_name'], pool)
- data['volume_backend_name'] = backend_name
-
- attributes = self._helpers.get_pool_attrs(pool)
- if not attributes:
- LOG.error(_LE('Could not get pool data from the storage.'))
- exception_message = (_('_update_volume_stats: '
- 'Could not get storage pool data.'))
- raise exception.VolumeBackendAPIException(data=exception_message)
-
- data['total_capacity_gb'] = (float(attributes['capacity']) /
- units.Gi)
- data['free_capacity_gb'] = (float(attributes['free_capacity']) /
- units.Gi)
- data['easytier_support'] = attributes['easy_tier'] in ['on', 'auto']
- data['compression_support'] = self._state['compression_enabled']
- data['location_info'] = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' %
- {'sys_id': self._state['system_id'],
- 'pool': pool})
-
- if self.replication:
- data.update(self.replication.get_replication_info())
-
- self._stats = data
-# Copyright 2014 IBM Corp.
+# Copyright 2015 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# under the License.
#
+import math
import random
import re
import time
import unicodedata
-
from eventlet import greenthread
from oslo_concurrency import processutils
+from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import strutils
+from oslo_utils import units
import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
+from cinder.volume import driver
+from cinder.volume.drivers.ibm.storwize_svc import (
+ replication as storwize_rep)
+from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import utils
from cinder.volume import volume_types
+
INTERVAL_1_SEC = 1
DEFAULT_TIMEOUT = 15
LOG = logging.getLogger(__name__)
+storwize_svc_opts = [
+ cfg.StrOpt('storwize_svc_volpool_name',
+ default='volpool',
+ help='Storage system storage pool for volumes'),
+ cfg.IntOpt('storwize_svc_vol_rsize',
+ default=2,
+ min=-1, max=100,
+ help='Storage system space-efficiency parameter for volumes '
+ '(percentage)'),
+ cfg.IntOpt('storwize_svc_vol_warning',
+ default=0,
+ min=-1, max=100,
+ help='Storage system threshold for volume capacity warnings '
+ '(percentage)'),
+ cfg.BoolOpt('storwize_svc_vol_autoexpand',
+ default=True,
+ help='Storage system autoexpand parameter for volumes '
+ '(True/False)'),
+ cfg.IntOpt('storwize_svc_vol_grainsize',
+ default=256,
+ help='Storage system grain size parameter for volumes '
+ '(32/64/128/256)'),
+ cfg.BoolOpt('storwize_svc_vol_compression',
+ default=False,
+ help='Storage system compression option for volumes'),
+ cfg.BoolOpt('storwize_svc_vol_easytier',
+ default=True,
+ help='Enable Easy Tier for volumes'),
+ cfg.IntOpt('storwize_svc_vol_iogrp',
+ default=0,
+ help='The I/O group in which to allocate volumes'),
+ cfg.IntOpt('storwize_svc_flashcopy_timeout',
+ default=120,
+ min=1, max=600,
+ help='Maximum number of seconds to wait for FlashCopy to be '
+ 'prepared.'),
+ cfg.BoolOpt('storwize_svc_multihostmap_enabled',
+ default=True,
+ help='This option no longer has any affect. It is deprecated '
+ 'and will be removed in the next release.',
+ deprecated_for_removal=True),
+ cfg.BoolOpt('storwize_svc_allow_tenant_qos',
+ default=False,
+ help='Allow tenants to specify QOS on create'),
+ cfg.StrOpt('storwize_svc_stretched_cluster_partner',
+ default=None,
+ help='If operating in stretched cluster mode, specify the '
+ 'name of the pool in which mirrored copies are stored.'
+ 'Example: "pool2"'),
+ cfg.BoolOpt('storwize_svc_vol_nofmtdisk',
+ default=False,
+ help='Specifies that the volume not be formatted during '
+ 'creation.'),
+ cfg.IntOpt('storwize_svc_flashcopy_rate',
+ default=50,
+ min=1, max=100,
+ help='Specifies the Storwize FlashCopy copy rate to be used '
+ 'when creating a full volume copy. The default is rate '
+ 'is 50, and the valid rates are 1-100.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(storwize_svc_opts)
+
class StorwizeSSH(object):
"""SSH interface to IBM Storwize family and SVC storage systems."""
return self.ssh.lshostvdiskmap(host_name)
@staticmethod
- def build_default_opts(config):
+ def build_default_opts(config, protocol):
# Ignore capitalization
- protocol = config.storwize_svc_connection_protocol
- if protocol.lower() == 'fc':
- protocol = 'FC'
- elif protocol.lower() == 'iscsi':
- protocol = 'iSCSI'
cluster_partner = config.storwize_svc_stretched_cluster_partner
opt = {'rsize': config.storwize_svc_vol_rsize,
# Check that the requested protocol is enabled
if opts['protocol'] not in state['enabled_protocols']:
raise exception.InvalidInput(
- reason=_('Illegal value %(prot)s specified for '
- 'storwize_svc_connection_protocol: '
- 'valid values are %(enabled)s.')
- % {'prot': opts['protocol'],
- 'enabled': ','.join(state['enabled_protocols'])})
+ reason=_('The storage device does not support %(prot)s. '
+ 'Please configure the device to support %(prot)s or '
+ 'switch to a driver using a different protocol.')
+ % {'prot': opts['protocol']})
if opts['iogrp'] not in state['available_iogrps']:
avail_grps = ''.join(str(e) for e in state['available_iogrps'])
timer = loopingcall.FixedIntervalLoopingCall(_inner)
timer.start(interval=interval).wait()
- def get_vdisk_params(self, config, state, type_id, volume_type=None,
- volume_metadata=None):
+ def get_vdisk_params(self, config, state, type_id, protocol = 'iSCSI',
+ volume_type=None, volume_metadata=None):
"""Return the parameters for creating the vdisk.
Takes volume type and defaults from config options into account.
"""
- opts = self.build_default_opts(config)
+ opts = self.build_default_opts(config, protocol)
ctxt = context.get_admin_context()
if volume_type is None and type_id is not None:
volume_type = volume_types.get_volume_type(ctxt, type_id)
else:
dict_[key] = [obj, value]
return dict_
+
+
+class StorwizeSVCCommonDriver(san.SanDriver,
+ driver.ManageableVD,
+ driver.ExtendVD, driver.SnapshotVD,
+ driver.MigrateVD, driver.ReplicaVD,
+ driver.ConsistencyGroupVD,
+ driver.CloneableImageVD,
+ driver.TransferVD):
+ """IBM Storwize V7000 SVC abstract base class for iSCSI/FC volume drivers.
+
+ Version history:
+ 1.0 - Initial driver
+ 1.1 - FC support, create_cloned_volume, volume type support,
+ get_volume_stats, minor bug fixes
+ 1.2.0 - Added retype
+ 1.2.1 - Code refactor, improved exception handling
+ 1.2.2 - Fix bug #1274123 (races in host-related functions)
+ 1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim to
+ lsfabric, clear unused data from connections, ensure matching
+ WWPNs by comparing lower case
+ 1.2.4 - Fix bug #1278035 (async migration/retype)
+ 1.2.5 - Added support for manage_existing (unmanage is inherited)
+ 1.2.6 - Added QoS support in terms of I/O throttling rate
+ 1.3.1 - Added support for volume replication
+ 1.3.2 - Added support for consistency group
+ 1.3.3 - Update driver to use ABC metaclasses
+ 2.0 - Code refactor, split init file and placed shared methods for
+ FC and iSCSI within the StorwizeSVCCommonDriver class
+ """
+
+ VERSION = "2.0"
+ VDISKCOPYOPS_INTERVAL = 600
+
+ def __init__(self, *args, **kwargs):
+ super(StorwizeSVCCommonDriver, self).__init__(*args, **kwargs)
+ self.configuration.append_config_values(storwize_svc_opts)
+ self._helpers = StorwizeHelpers(self._run_ssh)
+ self._vdiskcopyops = {}
+ self._vdiskcopyops_loop = None
+ self.protocol = ''
+ self.replication = None
+ self._state = {'storage_nodes': {},
+ 'enabled_protocols': set(),
+ 'compression_enabled': False,
+ 'available_iogrps': [],
+ 'system_name': None,
+ 'system_id': None,
+ 'code_level': None,
+ }
+ # Storwize has the limitation that can not burst more than 3 new ssh
+ # connections within 1 second. So slow down the initialization.
+ time.sleep(1)
+
+ def do_setup(self, ctxt):
+ """Check that we have all configuration details from the storage."""
+ LOG.debug('enter: do_setup')
+
+ # Get storage system name, id, and code level
+ self._state.update(self._helpers.get_system_info())
+
+ # Get the replication helpers
+ self.replication = storwize_rep.StorwizeSVCReplication.factory(self)
+
+ # Validate that the pool exists
+ pool = self.configuration.storwize_svc_volpool_name
+ try:
+ self._helpers.get_pool_attrs(pool)
+ except exception.VolumeBackendAPIException:
+ msg = _('Failed getting details for pool %s.') % pool
+ raise exception.InvalidInput(reason=msg)
+
+ # Check if compression is supported
+ self._state['compression_enabled'] = (self._helpers.
+ compression_enabled())
+
+ # Get the available I/O groups
+ self._state['available_iogrps'] = (self._helpers.
+ get_available_io_groups())
+
+ # Get the iSCSI and FC names of the Storwize/SVC nodes
+ self._state['storage_nodes'] = self._helpers.get_node_info()
+
+ # Build the list of in-progress vdisk copy operations
+ if ctxt is None:
+ admin_context = context.get_admin_context()
+ else:
+ admin_context = ctxt.elevated()
+ volumes = self.db.volume_get_all_by_host(admin_context, self.host)
+
+ for volume in volumes:
+ metadata = self.db.volume_admin_metadata_get(admin_context,
+ volume['id'])
+ curr_ops = metadata.get('vdiskcopyops', None)
+ if curr_ops:
+ ops = [tuple(x.split(':')) for x in curr_ops.split(';')]
+ self._vdiskcopyops[volume['id']] = ops
+
+ # if vdiskcopy exists in database, start the looping call
+ if len(self._vdiskcopyops) >= 1:
+ self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
+ self._check_volume_copy_ops)
+ self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
+
+ def check_for_setup_error(self):
+ """Ensure that the flags are set properly."""
+ LOG.debug('enter: check_for_setup_error')
+
+ # Check that we have the system ID information
+ if self._state['system_name'] is None:
+ exception_msg = (_('Unable to determine system name.'))
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+ if self._state['system_id'] is None:
+ exception_msg = (_('Unable to determine system id.'))
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ required_flags = ['san_ip', 'san_ssh_port', 'san_login',
+ 'storwize_svc_volpool_name']
+ for flag in required_flags:
+ if not self.configuration.safe_get(flag):
+ raise exception.InvalidInput(reason=_('%s is not set.') % flag)
+
+ # Ensure that either password or keyfile were set
+ if not (self.configuration.san_password or
+ self.configuration.san_private_key):
+ raise exception.InvalidInput(
+ reason=_('Password or SSH private key is required for '
+ 'authentication: set either san_password or '
+ 'san_private_key option.'))
+
+ opts = self._helpers.build_default_opts(self.configuration,
+ self.protocol)
+ self._helpers.check_vdisk_opts(self._state, opts)
+
+ LOG.debug('leave: check_for_setup_error')
+
+ def ensure_export(self, ctxt, volume):
+ """Check that the volume exists on the storage.
+
+ The system does not "export" volumes as a Linux iSCSI target does,
+ and therefore we just check that the volume exists on the storage.
+ """
+ volume_defined = self._helpers.is_vdisk_defined(volume['name'])
+ if not volume_defined:
+ LOG.error(_LE('ensure_export: Volume %s not found on storage.'),
+ volume['name'])
+
+ def create_export(self, ctxt, volume, connector):
+ model_update = None
+ return model_update
+
+ def remove_export(self, ctxt, volume):
+ pass
+
+ def _get_vdisk_params(self, type_id, volume_type=None,
+ volume_metadata=None):
+ return self._helpers.get_vdisk_params(self.configuration,
+ self._state, type_id,
+ self.protocol,
+ volume_type=volume_type,
+ volume_metadata=volume_metadata)
+
+ def create_volume(self, volume):
+ opts = self._get_vdisk_params(volume['volume_type_id'],
+ volume_metadata=
+ volume.get('volume_metadata'))
+ pool = self.configuration.storwize_svc_volpool_name
+ self._helpers.create_vdisk(volume['name'], str(volume['size']),
+ 'gb', pool, opts)
+ if opts['qos']:
+ self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
+
+ model_update = None
+ if opts.get('replication'):
+ ctxt = context.get_admin_context()
+ model_update = self.replication.create_replica(ctxt, volume)
+ return model_update
+
+ def delete_volume(self, volume):
+ self._helpers.delete_vdisk(volume['name'], False)
+
+ if volume['id'] in self._vdiskcopyops:
+ del self._vdiskcopyops[volume['id']]
+
+ if not len(self._vdiskcopyops):
+ self._vdiskcopyops_loop.stop()
+ self._vdiskcopyops_loop = None
+
+ def create_snapshot(self, snapshot):
+ ctxt = context.get_admin_context()
+ try:
+ source_vol = self.db.volume_get(ctxt, snapshot['volume_id'])
+ except Exception:
+ msg = (_('create_snapshot: get source volume failed.'))
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+ opts = self._get_vdisk_params(source_vol['volume_type_id'])
+ self._helpers.create_copy(snapshot['volume_name'], snapshot['name'],
+ snapshot['volume_id'], self.configuration,
+ opts, False)
+
+ def delete_snapshot(self, snapshot):
+ self._helpers.delete_vdisk(snapshot['name'], False)
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ if volume['size'] != snapshot['volume_size']:
+ msg = (_('create_volume_from_snapshot: Source and destination '
+ 'size differ.'))
+ LOG.error(msg)
+ raise exception.InvalidInput(message=msg)
+
+ opts = self._get_vdisk_params(volume['volume_type_id'],
+ volume_metadata=
+ volume.get('volume_metadata'))
+ self._helpers.create_copy(snapshot['name'], volume['name'],
+ snapshot['id'], self.configuration,
+ opts, True)
+ if opts['qos']:
+ self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
+
+ if 'replication' in opts and opts['replication']:
+ ctxt = context.get_admin_context()
+ replica_status = self.replication.create_replica(ctxt, volume)
+ if replica_status:
+ return replica_status
+
+ def create_cloned_volume(self, tgt_volume, src_volume):
+ if src_volume['size'] != tgt_volume['size']:
+ msg = (_('create_cloned_volume: Source and destination '
+ 'size differ.'))
+ LOG.error(msg)
+ raise exception.InvalidInput(message=msg)
+
+ opts = self._get_vdisk_params(tgt_volume['volume_type_id'],
+ volume_metadata=
+ tgt_volume.get('volume_metadata'))
+ self._helpers.create_copy(src_volume['name'], tgt_volume['name'],
+ src_volume['id'], self.configuration,
+ opts, True)
+ if opts['qos']:
+ self._helpers.add_vdisk_qos(tgt_volume['name'], opts['qos'])
+
+ if 'replication' in opts and opts['replication']:
+ ctxt = context.get_admin_context()
+ replica_status = self.replication.create_replica(ctxt, tgt_volume)
+ if replica_status:
+ return replica_status
+
+ def extend_volume(self, volume, new_size):
+ LOG.debug('enter: extend_volume: volume %s', volume['id'])
+ ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'],
+ allow_snaps=False)
+ if not ret:
+ msg = (_('extend_volume: Extending a volume with snapshots is not '
+ 'supported.'))
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ extend_amt = int(new_size) - volume['size']
+ self._helpers.extend_vdisk(volume['name'], extend_amt)
+ LOG.debug('leave: extend_volume: volume %s', volume['id'])
+
+ def add_vdisk_copy(self, volume, dest_pool, vol_type):
+ return self._helpers.add_vdisk_copy(volume, dest_pool,
+ vol_type, self._state,
+ self.configuration)
+
+ def _add_vdisk_copy_op(self, ctxt, volume, new_op):
+ metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
+ volume['id'])
+ curr_ops = metadata.get('vdiskcopyops', None)
+ if curr_ops:
+ curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
+ new_ops_list = curr_ops_list.append(new_op)
+ else:
+ new_ops_list = [new_op]
+ new_ops_str = ';'.join([':'.join(x) for x in new_ops_list])
+ self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
+ {'vdiskcopyops': new_ops_str},
+ False)
+ if volume['id'] in self._vdiskcopyops:
+ self._vdiskcopyops[volume['id']].append(new_op)
+ else:
+ self._vdiskcopyops[volume['id']] = [new_op]
+
+ # We added the first copy operation, so start the looping call
+ if len(self._vdiskcopyops) == 1:
+ self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
+ self._check_volume_copy_ops)
+ self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
+
+ def _rm_vdisk_copy_op(self, ctxt, volume, orig_copy_id, new_copy_id):
+ try:
+ self._vdiskcopyops[volume['id']].remove((orig_copy_id,
+ new_copy_id))
+ if not len(self._vdiskcopyops[volume['id']]):
+ del self._vdiskcopyops[volume['id']]
+ if not len(self._vdiskcopyops):
+ self._vdiskcopyops_loop.stop()
+ self._vdiskcopyops_loop = None
+ except KeyError:
+ LOG.error(_LE('_rm_vdisk_copy_op: Volume %s does not have any '
+ 'registered vdisk copy operations.'), volume['id'])
+ return
+ except ValueError:
+ LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s does not have '
+ 'the specified vdisk copy operation: orig=%(orig)s '
+ 'new=%(new)s.'),
+ {'vol': volume['id'], 'orig': orig_copy_id,
+ 'new': new_copy_id})
+ return
+
+ metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
+ volume['id'])
+ curr_ops = metadata.get('vdiskcopyops', None)
+ if not curr_ops:
+ LOG.error(_LE('_rm_vdisk_copy_op: Volume metadata %s does not '
+ 'have any registered vdisk copy operations.'),
+ volume['id'])
+ return
+ curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
+ try:
+ curr_ops_list.remove((orig_copy_id, new_copy_id))
+ except ValueError:
+ LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s metadata does '
+ 'not have the specified vdisk copy operation: '
+ 'orig=%(orig)s new=%(new)s.'),
+ {'vol': volume['id'], 'orig': orig_copy_id,
+ 'new': new_copy_id})
+ return
+
+ if len(curr_ops_list):
+ new_ops_str = ';'.join([':'.join(x) for x in curr_ops_list])
+ self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
+ {'vdiskcopyops': new_ops_str},
+ False)
+ else:
+ self.db.volume_admin_metadata_delete(ctxt.elevated(), volume['id'],
+ 'vdiskcopyops')
+
+ def promote_replica(self, ctxt, volume):
+ return self.replication.promote_replica(volume)
+
+ def reenable_replication(self, ctxt, volume):
+ return self.replication.reenable_replication(volume)
+
+ def create_replica_test_volume(self, tgt_volume, src_volume):
+ if src_volume['size'] != tgt_volume['size']:
+ msg = (_('create_cloned_volume: Source and destination '
+ 'size differ.'))
+ LOG.error(msg)
+ raise exception.InvalidInput(message=msg)
+ replica_status = self.replication.test_replica(tgt_volume,
+ src_volume)
+ return replica_status
+
+ def get_replication_status(self, ctxt, volume):
+ replica_status = None
+ if self.replication:
+ replica_status = self.replication.get_replication_status(volume)
+ return replica_status
+
+ def _check_volume_copy_ops(self):
+ LOG.debug("Enter: update volume copy status.")
+ ctxt = context.get_admin_context()
+ copy_items = list(self._vdiskcopyops.items())
+ for vol_id, copy_ops in copy_items:
+ try:
+ volume = self.db.volume_get(ctxt, vol_id)
+ except Exception:
+ LOG.warning(_LW('Volume %s does not exist.'), vol_id)
+ del self._vdiskcopyops[vol_id]
+ if not len(self._vdiskcopyops):
+ self._vdiskcopyops_loop.stop()
+ self._vdiskcopyops_loop = None
+ continue
+
+ for copy_op in copy_ops:
+ try:
+ synced = self._helpers.is_vdisk_copy_synced(volume['name'],
+ copy_op[1])
+ except Exception:
+ LOG.info(_LI('_check_volume_copy_ops: Volume %(vol)s does '
+ 'not have the specified vdisk copy '
+ 'operation: orig=%(orig)s new=%(new)s.'),
+ {'vol': volume['id'], 'orig': copy_op[0],
+ 'new': copy_op[1]})
+ else:
+ if synced:
+ self._helpers.rm_vdisk_copy(volume['name'], copy_op[0])
+ self._rm_vdisk_copy_op(ctxt, volume, copy_op[0],
+ copy_op[1])
+ LOG.debug("Exit: update volume copy status.")
+
+ def migrate_volume(self, ctxt, volume, host):
+ """Migrate directly if source and dest are managed by same storage.
+
+ We create a new vdisk copy in the desired pool, and add the original
+ vdisk copy to the admin_metadata of the volume to be deleted. The
+ deletion will occur using a periodic task once the new copy is synced.
+
+ :param ctxt: Context
+ :param volume: A dictionary describing the volume to migrate
+ :param host: A dictionary describing the host to migrate to, where
+ host['host'] is its name, and host['capabilities'] is a
+ dictionary of its reported capabilities.
+ """
+ LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s',
+ {'id': volume['id'], 'host': host['host']})
+
+ false_ret = (False, None)
+ dest_pool = self._helpers.can_migrate_to_host(host, self._state)
+ if dest_pool is None:
+ return false_ret
+
+ ctxt = context.get_admin_context()
+ volume_type_id = volume['volume_type_id']
+ if volume_type_id is not None:
+ vol_type = volume_types.get_volume_type(ctxt, volume_type_id)
+ else:
+ vol_type = None
+
+ self._check_volume_copy_ops()
+ new_op = self.add_vdisk_copy(volume['name'], dest_pool, vol_type)
+ self._add_vdisk_copy_op(ctxt, volume, new_op)
+ LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s',
+ {'id': volume['id'], 'host': host['host']})
+ return (True, None)
+
+ def retype(self, ctxt, volume, new_type, diff, host):
+ """Convert the volume to be of the new type.
+
+ Returns a boolean indicating whether the retype occurred.
+
+ :param ctxt: Context
+ :param volume: A dictionary describing the volume to migrate
+ :param new_type: A dictionary describing the volume type to convert to
+ :param diff: A dictionary with the difference between the two types
+ :param host: A dictionary describing the host to migrate to, where
+ host['host'] is its name, and host['capabilities'] is a
+ dictionary of its reported capabilities.
+ """
+ def retype_iogrp_property(volume, new, old):
+ if new != old:
+ self._helpers.change_vdisk_iogrp(volume['name'],
+ self._state, (new, old))
+
+ LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
+ 'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
+ 'new_type': new_type,
+ 'diff': diff,
+ 'host': host})
+
+ ignore_keys = ['protocol']
+ no_copy_keys = ['warning', 'autoexpand', 'easytier']
+ copy_keys = ['rsize', 'grainsize', 'compression']
+ all_keys = ignore_keys + no_copy_keys + copy_keys
+ old_opts = self._get_vdisk_params(volume['volume_type_id'],
+ volume_metadata=
+ volume.get('volume_matadata'))
+ new_opts = self._get_vdisk_params(new_type['id'],
+ volume_type=new_type)
+
+ # Check if retype affects volume replication
+ model_update = None
+ old_type_replication = old_opts.get('replication', False)
+ new_type_replication = new_opts.get('replication', False)
+
+ # Delete replica if needed
+ if old_type_replication and not new_type_replication:
+ self.replication.delete_replica(volume)
+ model_update = {'replication_status': 'disabled',
+ 'replication_driver_data': None,
+ 'replication_extended_status': None}
+
+ vdisk_changes = []
+ need_copy = False
+ for key in all_keys:
+ if old_opts[key] != new_opts[key]:
+ if key in copy_keys:
+ need_copy = True
+ break
+ elif key in no_copy_keys:
+ vdisk_changes.append(key)
+
+ dest_location = host['capabilities'].get('location_info')
+ if self._stats['location_info'] != dest_location:
+ need_copy = True
+
+ if need_copy:
+ self._check_volume_copy_ops()
+ dest_pool = self._helpers.can_migrate_to_host(host, self._state)
+ if dest_pool is None:
+ return False
+
+ # If volume is replicated, can't copy
+ if new_type_replication:
+ msg = (_('Unable to retype: Current action needs volume-copy,'
+ ' it is not allowed when new type is replication.'
+ ' Volume = %s'), volume['id'])
+ raise exception.VolumeDriverException(message=msg)
+
+ retype_iogrp_property(volume,
+ new_opts['iogrp'],
+ old_opts['iogrp'])
+ try:
+ new_op = self.add_vdisk_copy(volume['name'],
+ dest_pool,
+ new_type)
+ self._add_vdisk_copy_op(ctxt, volume, new_op)
+ except exception.VolumeDriverException:
+ # roll back changing iogrp property
+ retype_iogrp_property(volume, old_opts['iogrp'],
+ new_opts['iogrp'])
+ msg = (_('Unable to retype: A copy of volume %s exists. '
+ 'Retyping would exceed the limit of 2 copies.'),
+ volume['id'])
+ raise exception.VolumeDriverException(message=msg)
+ else:
+ retype_iogrp_property(volume, new_opts['iogrp'], old_opts['iogrp'])
+
+ self._helpers.change_vdisk_options(volume['name'], vdisk_changes,
+ new_opts, self._state)
+
+ if new_opts['qos']:
+ # Add the new QoS setting to the volume. If the volume has an
+ # old QoS setting, it will be overwritten.
+ self._helpers.update_vdisk_qos(volume['name'], new_opts['qos'])
+ elif old_opts['qos']:
+ # If the old_opts contain QoS keys, disable them.
+ self._helpers.disable_vdisk_qos(volume['name'], old_opts['qos'])
+
+ # Add replica if needed
+ if not old_type_replication and new_type_replication:
+ model_update = self.replication.create_replica(ctxt, volume,
+ new_type)
+
+ LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
+ 'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
+ 'new_type': new_type,
+ 'diff': diff,
+ 'host': host['host']})
+ return True, model_update
+
+ def update_migrated_volume(self, ctxt, volume, new_volume,
+ original_volume_status):
+ """Return model update from Storwize for migrated volume.
+
+ This method should rename the back-end volume name(id) on the
+ destination host back to its original name(id) on the source host.
+
+ :param ctxt: The context used to run the method update_migrated_volume
+ :param volume: The original volume that was migrated to this backend
+ :param new_volume: The migration volume object that was created on
+ this backend as part of the migration process
+ :param original_volume_status: The status of the original volume
+ :returns: model_update to update DB with any needed changes
+ """
+ current_name = CONF.volume_name_template % new_volume['id']
+ original_volume_name = CONF.volume_name_template % volume['id']
+ try:
+ self._helpers.rename_vdisk(current_name, original_volume_name)
+ except exception.VolumeBackendAPIException:
+ LOG.error(_LE('Unable to rename the logical volume '
+ 'for volume: %s'), volume['id'])
+ return {'_name_id': new_volume['_name_id'] or new_volume['id']}
+ # If the back-end name(id) for the volume has been renamed,
+ # it is OK for the volume to keep the original name(id) and there is
+ # no need to use the column "_name_id" to establish the mapping
+ # relationship between the volume id and the back-end volume
+ # name(id).
+ # Set the key "_name_id" to None for a successful rename.
+ model_update = {'_name_id': None}
+ return model_update
+
+ def manage_existing(self, volume, ref):
+ """Manages an existing vdisk.
+
+ Renames the vdisk to match the expected name for the volume.
+ Error checking done by manage_existing_get_size is not repeated -
+ if we got here then we have a vdisk that isn't in use (or we don't
+ care if it is in use.
+ """
+ vdisk = self._helpers.vdisk_by_uid(ref['source-id'])
+ if vdisk is None:
+ reason = (_('No vdisk with the UID specified by source-id %s.')
+ % ref['source-id'])
+ raise exception.ManageExistingInvalidReference(existing_ref=ref,
+ reason=reason)
+ self._helpers.rename_vdisk(vdisk['name'], volume['name'])
+
+ def manage_existing_get_size(self, volume, ref):
+ """Return size of an existing Vdisk for manage_existing.
+
+ existing_ref is a dictionary of the form:
+ {'source-id': <uid of disk>}
+
+ Optional elements are:
+ 'manage_if_in_use': True/False (default is False)
+ If set to True, a volume will be managed even if it is currently
+ attached to a host system.
+ """
+
+ # Check that the reference is valid
+ if 'source-id' not in ref:
+ reason = _('Reference must contain source-id element.')
+ raise exception.ManageExistingInvalidReference(existing_ref=ref,
+ reason=reason)
+
+ # Check for existence of the vdisk
+ vdisk = self._helpers.vdisk_by_uid(ref['source-id'])
+ if vdisk is None:
+ reason = (_('No vdisk with the UID specified by source-id %s.')
+ % (ref['source-id']))
+ raise exception.ManageExistingInvalidReference(existing_ref=ref,
+ reason=reason)
+
+ # Check if the disk is in use, if we need to.
+ manage_if_in_use = ref.get('manage_if_in_use', False)
+ if (not manage_if_in_use and
+ self._helpers.is_vdisk_in_use(vdisk['name'])):
+ reason = _('The specified vdisk is mapped to a host.')
+ raise exception.ManageExistingInvalidReference(existing_ref=ref,
+ reason=reason)
+
+ return int(math.ceil(float(vdisk['capacity']) / units.Gi))
+
+ def unmanage(self, volume):
+ """Remove the specified volume from Cinder management."""
+ pass
+
+ def get_volume_stats(self, refresh=False):
+ """Get volume stats.
+
+ If we haven't gotten stats yet or 'refresh' is True,
+ run update the stats first.
+ """
+ if not self._stats or refresh:
+ self._update_volume_stats()
+
+ return self._stats
+
+ def create_consistencygroup(self, context, group):
+ """Create a consistency group.
+
+ IBM Storwize will create CG until cg-snapshot creation,
+ db will maintain the volumes and CG relationship.
+ """
+ LOG.debug("Creating consistency group.")
+ model_update = {'status': 'available'}
+ return model_update
+
+ def delete_consistencygroup(self, context, group, volumes):
+ """Deletes a consistency group.
+
+ IBM Storwize will delete the volumes of the CG.
+ """
+ LOG.debug("Deleting consistency group.")
+ model_update = {}
+ model_update['status'] = 'deleted'
+ volumes = self.db.volume_get_all_by_group(context, group['id'])
+
+ for volume in volumes:
+ try:
+ self._helpers.delete_vdisk(volume['name'], True)
+ volume['status'] = 'deleted'
+ except exception.VolumeBackendAPIException as err:
+ volume['status'] = 'error_deleting'
+ if model_update['status'] != 'error_deleting':
+ model_update['status'] = 'error_deleting'
+ LOG.error(_LE("Failed to delete the volume %(vol)s of CG. "
+ "Exception: %(exception)s."),
+ {'vol': volume['name'], 'exception': err})
+ return model_update, volumes
+
+ def create_cgsnapshot(self, ctxt, cgsnapshot, snapshots):
+ """Creates a cgsnapshot."""
+ # Use cgsnapshot id as cg name
+ cg_name = 'cg_snap-' + cgsnapshot['id']
+ # Create new cg as cg_snapshot
+ self._helpers.create_fc_consistgrp(cg_name)
+
+ snapshots = self.db.snapshot_get_all_for_cgsnapshot(
+ ctxt, cgsnapshot['id'])
+ timeout = self.configuration.storwize_svc_flashcopy_timeout
+
+ model_update, snapshots_model = (
+ self._helpers.run_consistgrp_snapshots(cg_name,
+ snapshots,
+ self._state,
+ self.configuration,
+ timeout))
+
+ return model_update, snapshots_model
+
+ def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
+ """Deletes a cgsnapshot."""
+ cgsnapshot_id = cgsnapshot['id']
+ cg_name = 'cg_snap-' + cgsnapshot_id
+
+ snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
+ cgsnapshot_id)
+
+ model_update, snapshots_model = (
+ self._helpers.delete_consistgrp_snapshots(cg_name,
+ snapshots))
+
+ return model_update, snapshots_model
+
+ def _update_volume_stats(self):
+ """Retrieve stats info from volume group."""
+
+ LOG.debug("Updating volume stats.")
+ data = {}
+
+ data['vendor_name'] = 'IBM'
+ data['driver_version'] = self.VERSION
+ data['storage_protocol'] = self.protocol
+
+ data['total_capacity_gb'] = 0 # To be overwritten
+ data['free_capacity_gb'] = 0 # To be overwritten
+ data['reserved_percentage'] = self.configuration.reserved_percentage
+ data['multiattach'] = (self.configuration.
+ storwize_svc_multihostmap_enabled)
+ data['QoS_support'] = True
+ data['consistencygroup_support'] = True
+
+ pool = self.configuration.storwize_svc_volpool_name
+ backend_name = self.configuration.safe_get('volume_backend_name')
+ if not backend_name:
+ backend_name = '%s_%s' % (self._state['system_name'], pool)
+ data['volume_backend_name'] = backend_name
+
+ attributes = self._helpers.get_pool_attrs(pool)
+ if not attributes:
+ LOG.error(_LE('Could not get pool data from the storage.'))
+ exception_message = (_('_update_volume_stats: '
+ 'Could not get storage pool data.'))
+ raise exception.VolumeBackendAPIException(data=exception_message)
+
+ data['total_capacity_gb'] = (float(attributes['capacity']) /
+ units.Gi)
+ data['free_capacity_gb'] = (float(attributes['free_capacity']) /
+ units.Gi)
+ data['easytier_support'] = attributes['easy_tier'] in ['on', 'auto']
+ data['compression_support'] = self._state['compression_enabled']
+ data['location_info'] = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' %
+ {'sys_id': self._state['system_id'],
+ 'pool': pool})
+
+ if self.replication:
+ data.update(self.replication.get_replication_info())
+
+ self._stats = data
--- /dev/null
+# Copyright 2015 IBM Corp.
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+"""
+Volume FC driver for IBM Storwize family and SVC storage systems.
+
+Notes:
+1. If you specify both a password and a key file, this driver will use the
+ key file only.
+2. When using a key file for authentication, it is up to the user or
+ system administrator to store the private key in a safe manner.
+3. The defaults for creating volumes are "-rsize 2% -autoexpand
+ -grainsize 256 -warning 0". These can be changed in the configuration
+ file or by using volume types(recommended only for advanced users).
+
+Limitations:
+1. The driver expects CLI output in English, error messages may be in a
+ localized format.
+2. Clones and creating volumes from snapshots, where the source and target
+ are of different sizes, is not supported.
+
+"""
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import excutils
+
+from cinder import exception
+from cinder.i18n import _, _LE, _LI, _LW
+from cinder import utils
+from cinder.volume.drivers.ibm.storwize_svc import (
+ storwize_svc_common as storwize_common)
+from cinder.zonemanager import utils as fczm_utils
+
+LOG = logging.getLogger(__name__)
+
+storwize_svc_fc_opts = [
+ cfg.BoolOpt('storwize_svc_multipath_enabled',
+ default=False,
+ help='Connect with multipath (FC only; iSCSI multipath is '
+ 'controlled by Nova)'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(storwize_svc_fc_opts)
+
+
+class StorwizeSVCFCDriver(storwize_common.StorwizeSVCCommonDriver):
+ """IBM Storwize V7000 and SVC FC volume driver.
+
+ Version history:
+ 1.0 - Initial driver
+ 1.1 - FC support, create_cloned_volume, volume type support,
+ get_volume_stats, minor bug fixes
+ 1.2.0 - Added retype
+ 1.2.1 - Code refactor, improved exception handling
+ 1.2.2 - Fix bug #1274123 (races in host-related functions)
+ 1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim to
+ lsfabric, clear unused data from connections, ensure matching
+ WWPNs by comparing lower case
+ 1.2.4 - Fix bug #1278035 (async migration/retype)
+ 1.2.5 - Added support for manage_existing (unmanage is inherited)
+ 1.2.6 - Added QoS support in terms of I/O throttling rate
+ 1.3.1 - Added support for volume replication
+ 1.3.2 - Added support for consistency group
+ 1.3.3 - Update driver to use ABC metaclasses
+ 2.0 - Code refactor, split init file and placed shared methods for
+ FC and iSCSI within the StorwizeSVCCommonDriver class
+ """
+
+ VERSION = "2.0"
+
+ def __init__(self, *args, **kwargs):
+ super(StorwizeSVCFCDriver, self).__init__(*args, **kwargs)
+ self.configuration.append_config_values(
+ storwize_svc_fc_opts)
+
+ def do_setup(self, ctxt):
+ # Set protocol
+ self.protocol = 'FC'
+
+ # Setup common functionality between FC
+ super(StorwizeSVCFCDriver, self).do_setup(ctxt)
+
+ # Add WWPNs to the storage node info
+ self._helpers.add_fc_wwpns(self._state['storage_nodes'])
+
+ # For each node, check what connection modes it supports. Delete any
+ # nodes that do not support any types (may be partially configured).
+ to_delete = []
+ for k, node in self._state['storage_nodes'].items():
+ if len(node['WWPN']):
+ node['enabled_protocols'].append('FC')
+ self._state['enabled_protocols'].add('FC')
+ if not len(node['enabled_protocols']):
+ LOG.info(_LI("%(node)s will be removed since "
+ "it is not supported by the"
+ " FC driver."), {'node': node['name']})
+ to_delete.append(k)
+ for delkey in to_delete:
+ del self._state['storage_nodes'][delkey]
+
+ # Make sure we have at least one node configured
+ if not len(self._state['storage_nodes']):
+ msg = _('do_setup: No configured nodes.')
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ LOG.debug('leave: do_setup')
+
+ def validate_connector(self, connector):
+ """Check connector for at least one enabled FC protocol."""
+ if 'wwpns' not in connector:
+ LOG.error(_LE('The connector does not contain the required '
+ 'information.'))
+ raise exception.InvalidConnectorException(
+ missing='wwpns')
+
+ @fczm_utils.AddFCZone
+ @utils.synchronized('storwize-host', external=True)
+ def initialize_connection(self, volume, connector):
+ """Perform necessary work to make a FC connection.
+
+ To be able to create an FC connection from a given host to a
+ volume, we must:
+ 1. Translate the given WWNN to a host name
+ 2. Create new host on the storage system if it does not yet exist
+ 3. Map the volume to the host if it is not already done
+ 4. Return the connection information for relevant nodes (in the
+ proper I/O group)
+
+ """
+ LOG.debug('enter: initialize_connection: volume %(vol)s with connector'
+ ' %(conn)s', {'vol': volume['id'], 'conn': connector})
+
+ volume_name = volume['name']
+
+ # Check if a host object is defined for this host name
+ host_name = self._helpers.get_host_from_connector(connector)
+ if host_name is None:
+ # Host does not exist - add a new host to Storwize/SVC
+ host_name = self._helpers.create_host(connector)
+
+ volume_attributes = self._helpers.get_vdisk_attributes(volume_name)
+ if volume_attributes is None:
+ msg = (_('initialize_connection: Failed to get attributes'
+ ' for volume %s.') % volume_name)
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ multihostmap = self.configuration.storwize_svc_multihostmap_enabled
+ lun_id = self._helpers.map_vol_to_host(volume_name, host_name,
+ multihostmap)
+ try:
+ preferred_node = volume_attributes['preferred_node_id']
+ IO_group = volume_attributes['IO_group_id']
+ except KeyError as e:
+ LOG.error(_LE('Did not find expected column name in '
+ 'lsvdisk: %s.'), e)
+ raise exception.VolumeBackendAPIException(
+ data=_('initialize_connection: Missing volume attribute for '
+ 'volume %s.') % volume_name)
+
+ try:
+ # Get preferred node and other nodes in I/O group
+ preferred_node_entry = None
+ io_group_nodes = []
+ for node in self._state['storage_nodes'].values():
+ if node['id'] == preferred_node:
+ preferred_node_entry = node
+ if node['IO_group'] == IO_group:
+ io_group_nodes.append(node)
+
+ if not len(io_group_nodes):
+ msg = (_('initialize_connection: No node found in '
+ 'I/O group %(gid)s for volume %(vol)s.') %
+ {'gid': IO_group, 'vol': volume_name})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ if not preferred_node_entry:
+ # Get 1st node in I/O group
+ preferred_node_entry = io_group_nodes[0]
+ LOG.warning(_LW('initialize_connection: Did not find a '
+ 'preferred node for volume %s.'), volume_name)
+
+ properties = {}
+ properties['target_discovered'] = False
+ properties['target_lun'] = lun_id
+ properties['volume_id'] = volume['id']
+
+ conn_wwpns = self._helpers.get_conn_fc_wwpns(host_name)
+
+ # If conn_wwpns is empty, then that means that there were
+ # no target ports with visibility to any of the initiators
+ # so we return all target ports.
+ if len(conn_wwpns) == 0:
+ for node in self._state['storage_nodes'].values():
+ conn_wwpns.extend(node['WWPN'])
+
+ properties['target_wwn'] = conn_wwpns
+
+ i_t_map = self._make_initiator_target_map(connector['wwpns'],
+ conn_wwpns)
+ properties['initiator_target_map'] = i_t_map
+
+ # specific for z/VM, refer to cinder bug 1323993
+ if "zvm_fcp" in connector:
+ properties['zvm_fcp'] = connector['zvm_fcp']
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.terminate_connection(volume, connector)
+ LOG.error(_LE('initialize_connection: Failed '
+ 'to collect return '
+ 'properties for volume %(vol)s and connector '
+ '%(conn)s.\n'), {'vol': volume,
+ 'conn': connector})
+
+ LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n '
+ 'connector %(conn)s\n properties: %(prop)s',
+ {'vol': volume['id'], 'conn': connector,
+ 'prop': properties})
+
+ return {'driver_volume_type': 'fibre_channel', 'data': properties, }
+
+ def _make_initiator_target_map(self, initiator_wwpns, target_wwpns):
+ """Build a simplistic all-to-all mapping."""
+ i_t_map = {}
+ for i_wwpn in initiator_wwpns:
+ i_t_map[str(i_wwpn)] = []
+ for t_wwpn in target_wwpns:
+ i_t_map[i_wwpn].append(t_wwpn)
+
+ return i_t_map
+
+ @fczm_utils.RemoveFCZone
+ @utils.synchronized('storwize-host', external=True)
+ def terminate_connection(self, volume, connector, **kwargs):
+ """Cleanup after an FC connection has been terminated.
+
+ When we clean up a terminated connection between a given connector
+ and volume, we:
+ 1. Translate the given connector to a host name
+ 2. Remove the volume-to-host mapping if it exists
+ 3. Delete the host if it has no more mappings (hosts are created
+ automatically by this driver when mappings are created)
+ """
+ LOG.debug('enter: terminate_connection: volume %(vol)s with connector'
+ ' %(conn)s', {'vol': volume['id'], 'conn': connector})
+ vol_name = volume['name']
+ info = {}
+ if 'host' in connector:
+ # get host according to FC protocol
+ connector = connector.copy()
+
+ connector.pop('initiator', None)
+ info = {'driver_volume_type': 'fibre_channel',
+ 'data': {}}
+
+ host_name = self._helpers.get_host_from_connector(connector)
+ if host_name is None:
+ msg = (_('terminate_connection: Failed to get host name from'
+ ' connector.'))
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+ else:
+ # See bug #1244257
+ host_name = None
+
+ # Unmap volumes, if hostname is None, need to get value from vdiskmap
+ host_name = self._helpers.unmap_vol_from_host(vol_name, host_name)
+
+ # Host_name could be none
+ if host_name:
+ resp = self._helpers.check_host_mapped_vols(host_name)
+ if not len(resp):
+ LOG.info(_LI("Need to remove FC Zone, building initiator "
+ "target map."))
+ # Build info data structure for zone removing
+ if 'wwpns' in connector and host_name:
+ target_wwpns = self._helpers.get_conn_fc_wwpns(host_name)
+ init_targ_map = (self._make_initiator_target_map
+ (connector['wwpns'],
+ target_wwpns))
+ info['data'] = {'initiator_target_map': init_targ_map}
+ # No volume mapped to the host, delete host from array
+ self._helpers.delete_host(host_name)
+
+ LOG.debug('leave: terminate_connection: volume %(vol)s with '
+ 'connector %(conn)s', {'vol': volume['id'],
+ 'conn': connector})
+ return info
--- /dev/null
+# Copyright 2015 IBM Corp.
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+"""
+ISCSI volume driver for IBM Storwize family and SVC storage systems.
+
+Notes:
+1. If you specify both a password and a key file, this driver will use the
+ key file only.
+2. When using a key file for authentication, it is up to the user or
+ system administrator to store the private key in a safe manner.
+3. The defaults for creating volumes are "-rsize 2% -autoexpand
+ -grainsize 256 -warning 0". These can be changed in the configuration
+ file or by using volume types(recommended only for advanced users).
+
+Limitations:
+1. The driver expects CLI output in English, error messages may be in a
+ localized format.
+2. Clones and creating volumes from snapshots, where the source and target
+ are of different sizes, is not supported.
+
+"""
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import excutils
+
+from cinder import exception
+from cinder.i18n import _, _LE, _LI, _LW
+from cinder import utils
+
+from cinder.volume.drivers.ibm.storwize_svc import (
+ storwize_svc_common as storwize_common)
+
+LOG = logging.getLogger(__name__)
+
+storwize_svc_iscsi_opts = [
+ cfg.BoolOpt('storwize_svc_iscsi_chap_enabled',
+ default=True,
+ help='Configure CHAP authentication for iSCSI connections '
+ '(Default: Enabled)'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(storwize_svc_iscsi_opts)
+
+
+class StorwizeSVCISCSIDriver(storwize_common.StorwizeSVCCommonDriver):
+ """IBM Storwize V7000 and SVC iSCSI volume driver.
+
+ Version history:
+ 1.0 - Initial driver
+ 1.1 - FC support, create_cloned_volume, volume type support,
+ get_volume_stats, minor bug fixes
+ 1.2.0 - Added retype
+ 1.2.1 - Code refactor, improved exception handling
+ 1.2.2 - Fix bug #1274123 (races in host-related functions)
+ 1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim to
+ lsfabric, clear unused data from connections, ensure matching
+ WWPNs by comparing lower case
+ 1.2.4 - Fix bug #1278035 (async migration/retype)
+ 1.2.5 - Added support for manage_existing (unmanage is inherited)
+ 1.2.6 - Added QoS support in terms of I/O throttling rate
+ 1.3.1 - Added support for volume replication
+ 1.3.2 - Added support for consistency group
+ 1.3.3 - Update driver to use ABC metaclasses
+ 2.0 - Code refactor, split init file and placed shared methods for
+ FC and iSCSI within the StorwizeSVCCommonDriver class
+ """
+
+ VERSION = "2.0"
+
+ def __init__(self, *args, **kwargs):
+ super(StorwizeSVCISCSIDriver, self).__init__(*args, **kwargs)
+ self.configuration.append_config_values(
+ storwize_svc_iscsi_opts)
+
+ def do_setup(self, ctxt):
+ # Set protocol
+ self.protocol = 'iSCSI'
+
+ # Setup common functionality between FC and iSCSI
+ super(StorwizeSVCISCSIDriver, self).do_setup(ctxt)
+
+ # Get the iSCSI names of the Storwize/SVC nodes
+ self._state['storage_nodes'] = self._helpers.get_node_info()
+
+ # Add the iSCSI IP addresses to the storage node info
+ self._helpers.add_iscsi_ip_addrs(self._state['storage_nodes'])
+
+ # For each node, check what connection modes it supports. Delete any
+ # nodes that do not support any types (may be partially configured).
+ to_delete = []
+ for k, node in self._state['storage_nodes'].items():
+ if ((len(node['ipv4']) or len(node['ipv6']))
+ and len(node['iscsi_name'])):
+ node['enabled_protocols'].append('iSCSI')
+ self._state['enabled_protocols'].add('iSCSI')
+ if not len(node['enabled_protocols']):
+ LOG.info(_LI("%(node)s will be removed since "
+ "it is not supported by the "
+ "iSCSI driver."), {'node': node['name']})
+ to_delete.append(k)
+ for delkey in to_delete:
+ del self._state['storage_nodes'][delkey]
+
+ # Make sure we have at least one node configured
+ if not len(self._state['storage_nodes']):
+ msg = _('do_setup: No configured nodes.')
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ LOG.debug('leave: do_setup')
+
+ def validate_connector(self, connector):
+ """Check connector for at least one enabled iSCSI protocol."""
+ if 'initiator' not in connector:
+ LOG.error(_LE('The connector does not contain the required '
+ 'information.'))
+ raise exception.InvalidConnectorException(
+ missing='initiator')
+
+ @utils.synchronized('storwize-host', external=True)
+ def initialize_connection(self, volume, connector):
+ """Perform necessary work to make an iSCSI connection.
+
+ To be able to create an iSCSI connection from a given host to a
+ volume, we must:
+ 1. Translate the given iSCSI name to a host name
+ 2. Create new host on the storage system if it does not yet exist
+ 3. Map the volume to the host if it is not already done
+ 4. Return the connection information for relevant nodes (in the
+ proper I/O group)
+ """
+ LOG.debug('enter: initialize_connection: volume %(vol)s with connector'
+ ' %(conn)s', {'vol': volume['id'], 'conn': connector})
+
+ vol_opts = self._get_vdisk_params(volume['volume_type_id'])
+ volume_name = volume['name']
+
+ # Check if a host object is defined for this host name
+ host_name = self._helpers.get_host_from_connector(connector)
+ if host_name is None:
+ # Host does not exist - add a new host to Storwize/SVC
+ host_name = self._helpers.create_host(connector)
+
+ chap_secret = self._helpers.get_chap_secret_for_host(host_name)
+ chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled
+ if chap_enabled and chap_secret is None:
+ chap_secret = self._helpers.add_chap_secret_to_host(host_name)
+ elif not chap_enabled and chap_secret:
+ LOG.warning(_LW('CHAP secret exists for host but CHAP is '
+ 'disabled.'))
+
+ volume_attributes = self._helpers.get_vdisk_attributes(volume_name)
+ if volume_attributes is None:
+ msg = (_('initialize_connection: Failed to get attributes'
+ ' for volume %s.') % volume_name)
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ multihostmap = self.configuration.storwize_svc_multihostmap_enabled
+ lun_id = self._helpers.map_vol_to_host(volume_name, host_name,
+ multihostmap)
+ try:
+ preferred_node = volume_attributes['preferred_node_id']
+ IO_group = volume_attributes['IO_group_id']
+ except KeyError as e:
+ LOG.error(_LE('Did not find expected column name in '
+ 'lsvdisk: %s.'), e)
+ raise exception.VolumeBackendAPIException(
+ data=_('initialize_connection: Missing volume attribute for '
+ 'volume %s.') % volume_name)
+
+ try:
+ # Get preferred node and other nodes in I/O group
+ preferred_node_entry = None
+ io_group_nodes = []
+ for node in self._state['storage_nodes'].values():
+ if vol_opts['protocol'] not in node['enabled_protocols']:
+ continue
+ if node['id'] == preferred_node:
+ preferred_node_entry = node
+ if node['IO_group'] == IO_group:
+ io_group_nodes.append(node)
+
+ if not len(io_group_nodes):
+ msg = (_('initialize_connection: No node found in '
+ 'I/O group %(gid)s for volume %(vol)s.') %
+ {'gid': IO_group, 'vol': volume_name})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ if not preferred_node_entry:
+ # Get 1st node in I/O group
+ preferred_node_entry = io_group_nodes[0]
+ LOG.warning(_LW('initialize_connection: Did not find a '
+ 'preferred node for volume %s.'), volume_name)
+
+ properties = {}
+ properties['target_discovered'] = False
+ properties['target_lun'] = lun_id
+ properties['volume_id'] = volume['id']
+
+ if len(preferred_node_entry['ipv4']):
+ ipaddr = preferred_node_entry['ipv4'][0]
+ else:
+ ipaddr = preferred_node_entry['ipv6'][0]
+ properties['target_portal'] = '%s:%s' % (ipaddr, '3260')
+ properties['target_iqn'] = preferred_node_entry['iscsi_name']
+ if chap_secret:
+ properties['auth_method'] = 'CHAP'
+ properties['auth_username'] = connector['initiator']
+ properties['auth_password'] = chap_secret
+ properties['discovery_auth_method'] = 'CHAP'
+ properties['discovery_auth_username'] = (
+ connector['initiator'])
+ properties['discovery_auth_password'] = chap_secret
+
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.terminate_connection(volume, connector)
+ LOG.error(_LE('initialize_connection: Failed '
+ 'to collect return '
+ 'properties for volume %(vol)s and connector '
+ '%(conn)s.\n'), {'vol': volume,
+ 'conn': connector})
+
+ LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n '
+ 'connector %(conn)s\n properties: %(prop)s',
+ {'vol': volume['id'], 'conn': connector,
+ 'prop': properties})
+
+ return {'driver_volume_type': 'iscsi', 'data': properties, }
+
+ @utils.synchronized('storwize-host', external=True)
+ def terminate_connection(self, volume, connector, **kwargs):
+ """Cleanup after an iSCSI connection has been terminated.
+
+ When we clean up a terminated connection between a given connector
+ and volume, we:
+ 1. Translate the given connector to a host name
+ 2. Remove the volume-to-host mapping if it exists
+ 3. Delete the host if it has no more mappings (hosts are created
+ automatically by this driver when mappings are created)
+ """
+ LOG.debug('enter: terminate_connection: volume %(vol)s with connector'
+ ' %(conn)s', {'vol': volume['id'], 'conn': connector})
+ vol_name = volume['name']
+ info = {}
+ if 'host' in connector:
+ # get host according to iSCSI protocol
+ info = {'driver_volume_type': 'iscsi',
+ 'data': {}}
+
+ host_name = self._helpers.get_host_from_connector(connector)
+ if host_name is None:
+ msg = (_('terminate_connection: Failed to get host name from'
+ ' connector.'))
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+ else:
+ # See bug #1244257
+ host_name = None
+
+ # Unmap volumes, if hostname is None, need to get value from vdiskmap
+ host_name = self._helpers.unmap_vol_from_host(vol_name, host_name)
+
+ # Host_name could be none
+ if host_name:
+ resp = self._helpers.check_host_mapped_vols(host_name)
+ if not len(resp):
+ self._helpers.delete_host(host_name)
+
+ LOG.debug('leave: terminate_connection: volume %(vol)s with '
+ 'connector %(conn)s', {'vol': volume['id'],
+ 'conn': connector})
+ return info
--- /dev/null
+---
+upgrade:
+ - Split up __init__.py into storwize_svc_iscsi.py,storwize_svc_fc.py, and storwize_svc_common.py.
+ - Moved iSCSI/FC config options to respective files. Moved all other config options to storwize_svc_common.py. Removed storwize_svc_connection_protocol config option.
+ - Users will now need to set different values for volume_driver in cinder.conf. FC:volume_driver = cinder.volume.drivers.ibm.storwize_svc.storwize_svc_fc.StorwizeSVCFCDriver iSCSI:volume_driver = cinder.volume.drivers.ibm.storwize_svc.storwize_svc_iscsi.StorwizeSVCISCSIDriver