LEFTHAND_MODULE = "cinder.volume.drivers.san.hp_lefthand.HpSanISCSIDriver"
NFS_MODULE = "cinder.volume.drivers.nfs.NfsDriver"
SOLIDFIRE_MODULE = "cinder.volume.drivers.solidfire.SolidFireDriver"
-STORWIZE_SVC_MODULE = "cinder.volume.drivers.storwize_svc.StorwizeSVCDriver"
+STORWIZE_MODULE = "cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver"
WINDOWS_MODULE = "cinder.volume.drivers.windows.windows.WindowsDriver"
XIV_DS8K_MODULE = "cinder.volume.drivers.xiv_ds8k.XIVDS8KDriver"
ZADARA_MODULE = "cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver"
def test_storwize_svc_old(self):
self._load_driver('cinder.volume.storwize_svc.StorwizeSVCDriver')
- self.assertEqual(self._driver_module_name(), STORWIZE_SVC_MODULE)
+ self.assertEqual(self._driver_module_name(), STORWIZE_MODULE)
+
+ def test_storwize_svc_old2(self):
+ self._load_driver('cinder.volume.drivers.storwize_svc.'
+ 'StorwizeSVCDriver')
+ self.assertEqual(self._driver_module_name(), STORWIZE_MODULE)
def test_storwize_svc_new(self):
- self._load_driver(STORWIZE_SVC_MODULE)
- self.assertEqual(self._driver_module_name(), STORWIZE_SVC_MODULE)
+ self._load_driver(STORWIZE_MODULE)
+ self.assertEqual(self._driver_module_name(), STORWIZE_MODULE)
def test_windows_old(self):
self._load_driver('cinder.volume.windows.WindowsDriver')
Tests for the IBM Storwize family and SVC volume driver.
"""
-
+import mock
import random
import re
from cinder import units
from cinder import utils
from cinder.volume import configuration as conf
-from cinder.volume.drivers import storwize_svc
+from cinder.volume.drivers.ibm import storwize_svc
+from cinder.volume.drivers.ibm.storwize_svc import ssh
from cinder.volume import volume_types
-from eventlet import greenthread
-
LOG = logging.getLogger(__name__)
def __init__(self):
self.volume = None
- def volume_get(self, context, vol_id):
+ def volume_get(self, ctxt, vol_id):
return self.volume
def volume_set(self, vol):
return self._errors['CMMVC5903E']
# Find an unused ID
- def _find_unused_id(self, d):
+ @staticmethod
+ def _find_unused_id(d):
ids = []
- for k, v in d.iteritems():
+ for v in d.itervalues():
ids.append(int(v['id']))
ids.sort()
for index, n in enumerate(ids):
return str(len(ids))
# Check if name is valid
- def _is_invalid_name(self, name):
- if re.match("^[a-zA-Z_][\w ._-]*$", name):
+ @staticmethod
+ def _is_invalid_name(name):
+ if re.match(r'^[a-zA-Z_][\w ._-]*$', name):
return False
return True
# Convert argument string to dictionary
- def _cmd_to_dict(self, arg_list):
+ @staticmethod
+ def _cmd_to_dict(arg_list):
no_param_args = [
'autodelete',
'bytes',
ret['obj'] = arg_list[i]
return ret
- def _print_info_cmd(self, rows, delim=' ', nohdr=False, **kwargs):
+ @staticmethod
+ def _print_info_cmd(rows, delim=' ', nohdr=False, **kwargs):
"""Generic function for printing information."""
if nohdr:
del rows[0]
rows[index] = delim.join(rows[index])
return ('%s' % '\n'.join(rows), '')
- def _print_info_obj_cmd(self, header, row, delim=' ', nohdr=False):
+ @staticmethod
+ def _print_info_obj_cmd(header, row, delim=' ', nohdr=False):
"""Generic function for printing information for a specific object."""
objrows = []
for idx, val in enumerate(header):
objrows[index] = delim.join(objrows[index])
return ('%s' % '\n'.join(objrows), '')
- def _convert_bytes_units(self, bytestr):
+ @staticmethod
+ def _convert_bytes_units(bytestr):
num = int(bytestr)
unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
unit_index = 0
return '%d%s' % (num, unit_array[unit_index])
- def _convert_units_bytes(self, num, unit):
+ @staticmethod
+ def _convert_units_bytes(num, unit):
unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
unit_index = 0
target_wwpn = kwargs['wwpn'] if 'wwpn' in kwargs else None
host_infos = []
- for hk, hv in self._hosts_list.iteritems():
+ for hv in self._hosts_list.itervalues():
if not host_name or hv['host_name'].startswith(host_name):
- for mk, mv in self._mappings_list.iteritems():
+ for mv in self._mappings_list.itervalues():
if mv['host'] == hv['host_name']:
if not target_wwpn or target_wwpn in hv['wwpns']:
host_infos.append(hv)
return self._errors['CMMVC5753E']
if not force:
- for k, mapping in self._mappings_list.iteritems():
+ for mapping in self._mappings_list.itervalues():
if mapping['vol'] == vol_name:
return self._errors['CMMVC5840E']
- for k, fcmap in self._fcmappings_list.iteritems():
+ for fcmap in self._fcmappings_list.itervalues():
if ((fcmap['source'] == vol_name) or
(fcmap['target'] == vol_name)):
return self._errors['CMMVC5840E']
'fc_name': '',
'fc_map_count': '0',
}
- for k, fcmap in self._fcmappings_list.iteritems():
+ for fcmap in self._fcmappings_list.itervalues():
if ((fcmap['source'] == vol_name) or
(fcmap['target'] == vol_name)):
ret_vals['fc_id'] = fcmap['id']
'RC_name', 'vdisk_UID', 'fc_map_count', 'copy_count',
'fast_write_state', 'se_copy_count', 'RC_change'])
- for k, vol in self._volumes_list.iteritems():
+ for vol in self._volumes_list.itervalues():
if (('filtervalue' not in kwargs) or
(kwargs['filtervalue'] == 'name=' + vol['name'])):
fcmap_info = self._get_fcmap_info(vol['name'])
rows.append(['IO_group_id', vol['IO_group_id']])
rows.append(['IO_group_name', vol['IO_group_name']])
rows.append(['status', 'online'])
- rows.append(['mdisk_grp_id', '0'])
- if 'mdisk_grp_name' in vol:
- mdisk_grp_name = vol['mdisk_grp_name']
- else:
- mdisk_grp_name = self._flags['storwize_svc_volpool_name']
- rows.append(['mdisk_grp_name', mdisk_grp_name])
rows.append(['capacity', cap])
- rows.append(['type', 'striped'])
rows.append(['formatted', 'no'])
rows.append(['mdisk_id', ''])
rows.append(['mdisk_name', ''])
rows.append(['se_copy_count', '0'])
rows.append(['mirror_write_priority', 'latency'])
rows.append(['RC_change', 'no'])
- rows.append(['used_capacity', cap_u])
- rows.append(['real_capacity', cap_r])
- rows.append(['free_capacity', cap_f])
- rows.append(['autoexpand', vol['autoexpand']])
- rows.append(['warning', vol['warning']])
- rows.append(['grainsize', vol['grainsize']])
- rows.append(['easy_tier', vol['easy_tier']])
- rows.append(['compressed_copy', vol['compressed_copy']])
+
+ for copy in vol['copies'].itervalues():
+ rows.append(['copy_id', copy['id']])
+ rows.append(['status', copy['status']])
+ rows.append(['primary', copy['primary']])
+ rows.append(['mdisk_grp_id', copy['mdisk_grp_id']])
+ rows.append(['mdisk_grp_name', copy['mdisk_grp_name']])
+ rows.append(['type', 'striped'])
+ rows.append(['used_capacity', cap_u])
+ rows.append(['real_capacity', cap_r])
+ rows.append(['free_capacity', cap_f])
+ rows.append(['easy_tier', copy['easy_tier']])
+ rows.append(['compressed_copy', copy['compressed_copy']])
+ rows.append(['autoexpand', vol['autoexpand']])
+ rows.append(['warning', vol['warning']])
+ rows.append(['grainsize', vol['grainsize']])
if 'nohdr' in kwargs:
for index in range(len(rows)):
host_info[added_key].append(added_val)
- for k, v in self._hosts_list.iteritems():
+ for v in self._hosts_list.itervalues():
if v['id'] == host_info['id']:
continue
for port in v[added_key]:
if host_name not in self._hosts_list:
return self._errors['CMMVC5753E']
- for k, v in self._mappings_list.iteritems():
+ for v in self._mappings_list.itervalues():
if (v['host'] == host_name):
return self._errors['CMMVC5871E']
rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status'])
found = False
- for k, host in self._hosts_list.iteritems():
+ for host in self._hosts_list.itervalues():
filterstr = 'name=' + host['host_name']
if (('filtervalue' not in kwargs) or
(kwargs['filtervalue'] == filterstr)):
rows.append(['type', 'id', 'name', 'iscsi_auth_method',
'iscsi_chap_secret'])
- for k, host in self._hosts_list.iteritems():
+ for host in self._hosts_list.itervalues():
method = 'none'
secret = ''
if 'chapsecret' in host:
if mapping_info['vol'] in self._mappings_list:
return self._errors['CMMVC6071E']
- for k, v in self._mappings_list.iteritems():
+ for v in self._mappings_list.itervalues():
if ((v['host'] == mapping_info['host']) and
(v['lun'] == mapping_info['lun'])):
return self._errors['CMMVC5879E']
- for k, v in self._mappings_list.iteritems():
+ for v in self._mappings_list.itervalues():
if (v['lun'] == mapping_info['lun']) and ('force' not in kwargs):
return self._errors['CMMVC6071E']
vol = kwargs['obj'].strip('\'\'')
mapping_ids = []
- for k, v in self._mappings_list.iteritems():
+ for v in self._mappings_list.itervalues():
if v['vol'] == vol:
mapping_ids.append(v['id'])
if not mapping_ids:
# List information about host->vdisk mappings
def _cmd_lshostvdiskmap(self, **kwargs):
- index = 1
- no_hdr = 0
- delimeter = ''
host_name = kwargs['obj']
if host_name not in self._hosts_list:
rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name',
'vdisk_UID'])
- for k, mapping in self._mappings_list.iteritems():
+ for mapping in self._mappings_list.itervalues():
if (host_name == '') or (mapping['host'] == host_name):
volume = self._volumes_list[mapping['vol']]
rows.append([mapping['id'], mapping['host'],
rows.append(['id name', 'SCSI_id', 'host_id', 'host_name', 'vdisk_UID',
'IO_group_id', 'IO_group_name'])
- for k, mapping in self._mappings_list.iteritems():
+ for mapping in self._mappings_list.itervalues():
if (mapping['vol'] == vdisk_name):
mappings_found += 1
volume = self._volumes_list[mapping['vol']]
vdisk = kwargs['obj']
rows = []
rows.append(['id', 'name'])
- for k, v in self._fcmappings_list.iteritems():
+ for v in self._fcmappings_list.itervalues():
if v['source'] == vdisk or v['target'] == vdisk:
rows.append([v['id'], v['name']])
return self._print_info_cmd(rows=rows, **kwargs)
'primary', 'mdisk_grp_id', 'mdisk_grp_name', 'capacity',
'type', 'se_copy', 'easy_tier', 'easy_tier_status',
'compressed_copy'])
- for k, copy in vol['copies'].iteritems():
+ for copy in vol['copies'].itervalues():
rows.append([vol['id'], vol['name'], copy['id'],
copy['status'], copy['sync'], copy['primary'],
copy['mdisk_grp_id'], copy['mdisk_grp_name'],
return self._errors['CMMVC6353E']
del vol['copies'][copy_id]
- copy_info = vol['copies'].values()[0]
- for key in copy_info:
- vol[key] = copy_info[key]
- del vol['copies']
return ('', '')
def _cmd_chvdisk(self, **kwargs):
self._hosts_list[connector['host']] = host_info
def _host_in_list(self, host_name):
- for k, v in self._hosts_list.iteritems():
+ for k in self._hosts_list:
if k.startswith(host_name):
return k
return None
def set_fake_storage(self, fake):
self.fake_storage = fake
- def _run_ssh(self, cmd, check_exit_code=True):
+ def _run_ssh(self, cmd, check_exit_code=True, attempts=1):
try:
LOG.debug(_('Run CLI command: %s') % cmd)
ret = self.fake_storage.execute_command(cmd, check_exit_code)
return ret
-class StorwizeSVCFakeSock:
- def settimeout(self, time):
- return
-
-
class StorwizeSVCDriverTestCase(test.TestCase):
def setUp(self):
super(StorwizeSVCDriverTestCase, self).setUp()
self.driver.db = StorwizeSVCFakeDB()
self.driver.do_setup(None)
self.driver.check_for_setup_error()
- self.stubs.Set(storwize_svc.time, 'sleep', lambda s: None)
- self.stubs.Set(greenthread, 'sleep', lambda *x, **y: None)
- self.stubs.Set(storwize_svc, 'CHECK_FCMAPPING_INTERVAL', 0)
+ self.sleeppatch = mock.patch('eventlet.greenthread.sleep')
+ self.sleeppatch.start()
+ self.driver._helpers.check_fcmapping_interval = 0
+
+ def tearDown(self):
+ if self.USESIM:
+ self.sleeppatch.stop()
+ super(StorwizeSVCDriverTestCase, self).tearDown()
def _set_flag(self, flag, value):
group = self.driver.configuration.config_group
self._set_flag(k, v)
def _assert_vol_exists(self, name, exists):
- is_vol_defined = self.driver._is_vdisk_defined(name)
+ is_vol_defined = self.driver._helpers.is_vdisk_defined(name)
self.assertEqual(is_vol_defined, exists)
def test_storwize_svc_connectivity(self):
volume['volume_type'] = type_ref
self.driver.create_volume(volume)
- attrs = self.driver._get_vdisk_attributes(volume['name'])
+ attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
self.driver.delete_volume(volume)
volume_types.destroy(ctxt, type_ref['id'])
return attrs
- def _fail_prepare_fc_map(self, fc_map_id, source, target):
- raise processutils.ProcessExecutionError(exit_code=1,
- stdout='',
- stderr='unit-test-fail',
- cmd='prestartfcmap id')
-
def test_storwize_svc_snapshots(self):
vol1 = self._generate_vol_info(None, None)
self.driver.create_volume(vol1)
# Test timeout and volume cleanup
self._set_flag('storwize_svc_flashcopy_timeout', 1)
- self.assertRaises(exception.InvalidSnapshot,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.create_snapshot, snap1)
self._assert_vol_exists(snap1['name'], False)
self._reset_flags()
- # Test prestartfcmap, startfcmap, and rmfcmap failing
- orig = self.driver._call_prepare_fc_map
- self.driver._call_prepare_fc_map = self._fail_prepare_fc_map
- self.assertRaises(processutils.ProcessExecutionError,
- self.driver.create_snapshot, snap1)
- self.driver._call_prepare_fc_map = orig
+ # Test prestartfcmap failing
+ with mock.patch.object(ssh.StorwizeSSH, 'prestartfcmap') as prestart:
+ prestart.side_effect = exception.VolumeBackendAPIException
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.create_snapshot, snap1)
if self.USESIM:
self.sim.error_injection('lsfcmap', 'speed_up')
self.sim.error_injection('startfcmap', 'bad_id')
- self.assertRaises(processutils.ProcessExecutionError,
+ self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, snap1)
self._assert_vol_exists(snap1['name'], False)
self.sim.error_injection('prestartfcmap', 'bad_id')
- self.assertRaises(processutils.ProcessExecutionError,
+ self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, snap1)
self._assert_vol_exists(snap1['name'], False)
# Try to create a snapshot from an non-existing volume - should fail
snap_novol = self._generate_vol_info('undefined-vol', '12345')
- self.assertRaises(exception.VolumeNotFound,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.create_snapshot,
snap_novol)
# Try to create a volume from a non-existing snapshot
snap_novol = self._generate_vol_info('undefined-vol', '12345')
vol_novol = self._generate_vol_info(None, None)
- self.assertRaises(exception.SnapshotNotFound,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.create_volume_from_snapshot,
vol_novol,
snap_novol)
# Fail the snapshot
- orig = self.driver._call_prepare_fc_map
- self.driver._call_prepare_fc_map = self._fail_prepare_fc_map
- self.assertRaises(processutils.ProcessExecutionError,
- self.driver.create_volume_from_snapshot,
- vol2, snap1)
- self.driver._call_prepare_fc_map = orig
- self._assert_vol_exists(vol2['name'], False)
+ with mock.patch.object(ssh.StorwizeSSH, 'prestartfcmap') as prestart:
+ prestart.side_effect = exception.VolumeBackendAPIException
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.create_volume_from_snapshot,
+ vol2, snap1)
+ self._assert_vol_exists(vol2['name'], False)
# Try to create where source size != target size
vol2['size'] += 1
- self.assertRaises(exception.VolumeBackendAPIException,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.create_volume_from_snapshot,
vol2, snap1)
self._assert_vol_exists(vol2['name'], False)
# Try to clone where source size != target size
vol3['size'] += 1
- self.assertRaises(exception.VolumeBackendAPIException,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.create_cloned_volume,
vol3, vol2)
self._assert_vol_exists(vol3['name'], False)
self.driver.remove_export(None, volume)
# Make sure volume attributes are as they should be
- attributes = self.driver._get_vdisk_attributes(volume['name'])
+ attributes = self.driver._helpers.get_vdisk_attributes(volume['name'])
attr_size = float(attributes['capacity']) / units.GiB # bytes to GB
self.assertEqual(attr_size, float(volume['size']))
pool = self.driver.configuration.local_conf.storwize_svc_volpool_name
self.assertEqual(attributes['mdisk_grp_name'], pool)
# Try to create the volume again (should fail)
- self.assertRaises(processutils.ProcessExecutionError,
+ self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
volume)
self.driver.create_volume(volume1)
self._assert_vol_exists(volume1['name'], True)
- self.assertRaises(exception.NoValidHost,
- self.driver._connector_to_hostname_prefix,
+ self.assertRaises(exception.VolumeDriverException,
+ self.driver._helpers.create_host,
{'host': 12345})
- # Add a a host first to make life interesting (this host and
+ # Add a host first to make life interesting (this host and
# conn['host'] should be translated to the same prefix, and the
# initiator should differentiate
tmpconn1 = {'initiator': u'unicode:initiator1.%s' % rand_id,
'ip': '10.10.10.10',
'host': u'unicode.foo}.bar{.baz-%s' % rand_id}
- self.driver._create_host(tmpconn1)
+ self.driver._helpers.create_host(tmpconn1)
# Add a host with a different prefix
tmpconn2 = {'initiator': u'unicode:initiator2.%s' % rand_id,
'ip': '10.10.10.11',
'host': u'unicode.hello.world-%s' % rand_id}
- self.driver._create_host(tmpconn2)
+ self.driver._helpers.create_host(tmpconn2)
conn = {'initiator': u'unicode:initiator3.%s' % rand_id,
'ip': '10.10.10.12',
'host': u'unicode.foo}.bar}.baz-%s' % rand_id}
self.driver.initialize_connection(volume1, conn)
- host_name = self.driver._get_host_from_connector(conn)
+ host_name = self.driver._helpers.get_host_from_connector(conn)
self.assertIsNotNone(host_name)
self.driver.terminate_connection(volume1, conn)
- host_name = self.driver._get_host_from_connector(conn)
+ host_name = self.driver._helpers.get_host_from_connector(conn)
self.assertIsNone(host_name)
self.driver.delete_volume(volume1)
# Clean up temporary hosts
for tmpconn in [tmpconn1, tmpconn2]:
- host_name = self.driver._get_host_from_connector(tmpconn)
+ host_name = self.driver._helpers.get_host_from_connector(tmpconn)
self.assertIsNotNone(host_name)
- self.driver._delete_host(host_name)
+ self.driver._helpers.delete_host(host_name)
def test_storwize_svc_validate_connector(self):
conn_neither = {'host': 'host'}
conn_fc = {'host': 'host', 'wwpns': 'bar'}
conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'}
- self.driver._enabled_protocols = set(['iSCSI'])
+ self.driver._state['enabled_protocols'] = set(['iSCSI'])
self.driver.validate_connector(conn_iscsi)
self.driver.validate_connector(conn_both)
- self.assertRaises(exception.VolumeBackendAPIException,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.validate_connector, conn_fc)
- self.assertRaises(exception.VolumeBackendAPIException,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.validate_connector, conn_neither)
- self.driver._enabled_protocols = set(['FC'])
+ self.driver._state['enabled_protocols'] = set(['FC'])
self.driver.validate_connector(conn_fc)
self.driver.validate_connector(conn_both)
- self.assertRaises(exception.VolumeBackendAPIException,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.validate_connector, conn_iscsi)
- self.assertRaises(exception.VolumeBackendAPIException,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.validate_connector, conn_neither)
- self.driver._enabled_protocols = set(['iSCSI', 'FC'])
+ self.driver._state['enabled_protocols'] = set(['iSCSI', 'FC'])
self.driver.validate_connector(conn_iscsi)
self.driver.validate_connector(conn_fc)
self.driver.validate_connector(conn_both)
- self.assertRaises(exception.VolumeBackendAPIException,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.validate_connector, conn_neither)
def test_storwize_svc_host_maps(self):
# Check case where no hosts exist
if self.USESIM:
- ret = self.driver._get_host_from_connector(self._connector)
+ ret = self.driver._helpers.get_host_from_connector(
+ self._connector)
self.assertIsNone(ret)
# Make sure that the volumes have been created
self.driver.initialize_connection(volume1, self._connector)
# Try to delete the 1st volume (should fail because it is mapped)
- self.assertRaises(processutils.ProcessExecutionError,
+ self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume,
volume1)
self.driver.terminate_connection(volume1, self._connector)
if self.USESIM:
- ret = self.driver._get_host_from_connector(self._connector)
+ ret = self.driver._helpers.get_host_from_connector(
+ self._connector)
self.assertIsNone(ret)
# Check cases with no auth set for host
init_ret = self.driver.initialize_connection(volume1,
conn_na)
host_name = self.sim._host_in_list(conn_na['host'])
- chap_ret = self.driver._get_chap_secret_for_host(host_name)
+ chap_ret = self.driver._helpers.get_chap_secret_for_host(
+ host_name)
if auth_enabled or host_exists == 'yes-auth':
self.assertIn('auth_password', init_ret['data'])
self.assertIsNotNone(chap_ret)
conn_no_exist = self._connector.copy()
conn_no_exist['initiator'] = 'i_dont_exist'
conn_no_exist['wwpns'] = ['0000000000000000']
- self.assertRaises(exception.VolumeBackendAPIException,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.terminate_connection,
volume1,
conn_no_exist)
self._assert_vol_exists(volume1['name'], False)
# Make sure our host still exists
- host_name = self.driver._get_host_from_connector(self._connector)
+ host_name = self.driver._helpers.get_host_from_connector(
+ self._connector)
self.assertIsNotNone(host_name)
# Remove the mapping from the 2nd volume. The host should
# specified (see bug #1244257)
fake_conn = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
self.driver.initialize_connection(volume2, self._connector)
- host_name = self.driver._get_host_from_connector(self._connector)
+ host_name = self.driver._helpers.get_host_from_connector(
+ self._connector)
self.assertIsNotNone(host_name)
self.driver.terminate_connection(volume2, fake_conn)
- host_name = self.driver._get_host_from_connector(self._connector)
+ host_name = self.driver._helpers.get_host_from_connector(
+ self._connector)
self.assertIsNone(host_name)
self.driver.delete_volume(volume2)
self._assert_vol_exists(volume2['name'], False)
# Check if our host still exists (it should not)
if self.USESIM:
- ret = self.driver._get_host_from_connector(self._connector)
+ ret = self.driver._helpers.get_host_from_connector(self._connector)
self.assertIsNone(ret)
def test_storwize_svc_multi_host_maps(self):
if self.USESIM and False:
snap = self._generate_vol_info(master['name'], master['id'])
self.sim.error_injection('startfcmap', 'bad_id')
- self.assertRaises(processutils.ProcessExecutionError,
+ self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, snap)
self._assert_vol_exists(snap['name'], False)
volfs = self._generate_vol_info(None, None)
self.sim.error_injection('startfcmap', 'bad_id')
self.sim.error_injection('lsfcmap', 'speed_up')
- self.assertRaises(processutils.ProcessExecutionError,
+ self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
volfs, snap)
self._assert_vol_exists(volfs['name'], False)
clone = self._generate_vol_info(None, None)
self.sim.error_injection('startfcmap', 'bad_id')
self.sim.error_injection('lsfcmap', 'speed_up')
- self.assertRaises(processutils.ProcessExecutionError,
+ self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
clone, volfs)
self._assert_vol_exists(clone['name'], False)
volume = self._generate_vol_info(None, None)
self.driver.db.volume_set(volume)
self.driver.create_volume(volume)
- stats = self.driver.extend_volume(volume, '13')
- attrs = self.driver._get_vdisk_attributes(volume['name'])
+ self.driver.extend_volume(volume, '13')
+ attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
vol_size = int(attrs['capacity']) / units.GiB
self.assertAlmostEqual(vol_size, 13)
snap = self._generate_vol_info(volume['name'], volume['id'])
self.driver.create_snapshot(snap)
self._assert_vol_exists(snap['name'], True)
- self.assertRaises(exception.VolumeBackendAPIException,
+ self.assertRaises(exception.VolumeDriverException,
self.driver.extend_volume, volume, '16')
self.driver.delete_snapshot(snap)
self._check_loc_info(cap, {'moved': False, 'model_update': None})
def test_storwize_svc_migrate_same_extent_size(self):
- def _copy_info_exc(self, name):
- raise Exception('should not be called')
-
- self.stubs.Set(self.driver, '_get_vdisk_copy_info', _copy_info_exc)
- self.driver.do_setup(None)
- loc = 'StorwizeSVCDriver:' + self.driver._system_id + ':openstack2'
- cap = {'location_info': loc, 'extent_size': '256'}
- host = {'host': 'foo', 'capabilities': cap}
- ctxt = context.get_admin_context()
- volume = self._generate_vol_info(None, None)
- volume['volume_type_id'] = None
- self.driver.create_volume(volume)
- self.driver.migrate_volume(ctxt, volume, host)
- self.driver.delete_volume(volume)
+ # Make sure we don't call migrate_volume_vdiskcopy
+ with mock.patch.object(self.driver._helpers,
+ 'migrate_volume_vdiskcopy') as migr_vdiskcopy:
+ migr_vdiskcopy.side_effect = KeyError
+ self.driver.do_setup(None)
+ loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] +
+ ':openstack2')
+ cap = {'location_info': loc, 'extent_size': '256'}
+ host = {'host': 'foo', 'capabilities': cap}
+ ctxt = context.get_admin_context()
+ volume = self._generate_vol_info(None, None)
+ volume['volume_type_id'] = None
+ self.driver.create_volume(volume)
+ self.driver.migrate_volume(ctxt, volume, host)
+ self.driver.delete_volume(volume)
def test_storwize_svc_migrate_diff_extent_size(self):
self.driver.do_setup(None)
- loc = 'StorwizeSVCDriver:' + self.driver._system_id + ':openstack3'
+ loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] +
+ ':openstack3')
cap = {'location_info': loc, 'extent_size': '128'}
host = {'host': 'foo', 'capabilities': cap}
ctxt = context.get_admin_context()
volume = self._generate_vol_info(None, None)
volume['volume_type_id'] = None
self.driver.create_volume(volume)
- self.assertNotEqual(cap['extent_size'], self.driver._extent_size)
+ self.assertNotEqual(cap['extent_size'],
+ self.driver._state['extent_size'])
self.driver.migrate_volume(ctxt, volume, host)
- attrs = self.driver._get_vdisk_attributes(volume['name'])
- self.assertEqual('openstack3', attrs['mdisk_grp_name'], 'migrate '
- 'with diff extent size failed')
+ attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
+ print('AVISHAY ' + str(attrs))
+ self.assertIn('openstack3', attrs['mdisk_grp_name'])
self.driver.delete_volume(volume)
def test_storwize_svc_retype_no_copy(self):
self.driver.do_setup(None)
- loc = 'StorwizeSVCDriver:' + self.driver._system_id + ':openstack'
+ loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] +
+ ':openstack')
cap = {'location_info': loc, 'extent_size': '128'}
self.driver._stats = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
- diff, equel = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
+ diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
new_type_ref['id'])
volume = self._generate_vol_info(None, None)
self.driver.create_volume(volume)
self.driver.retype(ctxt, volume, new_type, diff, host)
- attrs = self.driver._get_vdisk_attributes(volume['name'])
+ attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
self.assertEqual('on', attrs['easy_tier'], 'Volume retype failed')
self.assertEqual('5', attrs['warning'], 'Volume retype failed')
self.assertEqual('off', attrs['autoexpand'], 'Volume retype failed')
def test_storwize_svc_retype_only_change_iogrp(self):
self.driver.do_setup(None)
- loc = 'StorwizeSVCDriver:' + self.driver._system_id + ':openstack'
+ loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] +
+ ':openstack')
cap = {'location_info': loc, 'extent_size': '128'}
self.driver._stats = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
self.driver.create_volume(volume)
self.driver.retype(ctxt, volume, new_type, diff, host)
- attrs = self.driver._get_vdisk_attributes(volume['name'])
+ attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
self.assertEqual('1', attrs['IO_group_id'], 'Volume retype '
'failed')
self.driver.delete_volume(volume)
def test_storwize_svc_retype_need_copy(self):
self.driver.do_setup(None)
- loc = 'StorwizeSVCDriver:' + self.driver._system_id + ':openstack'
+ loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] +
+ ':openstack')
cap = {'location_info': loc, 'extent_size': '128'}
self.driver._stats = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
self.driver.create_volume(volume)
self.driver.retype(ctxt, volume, new_type, diff, host)
- attrs = self.driver._get_vdisk_attributes(volume['name'])
- self.assertEqual('no', attrs['compressed_copy'], 'Volume retype '
- 'failed')
+ attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
+ self.assertEqual('no', attrs['compressed_copy'])
self.driver.delete_volume(volume)
def test_set_storage_code_level_success(self):
- code_level = '7.2.0.0 (build 87.0.1311291000)'
- res = self.driver._get_code_level(code_level)
- self.assertEqual((7, 2, 0, 0), res, 'Get code level error')
+ res = self.driver._helpers.get_system_info()
+ self.assertEqual((7, 2, 0, 0), res['code_level'],
+ 'Get code level error')
class CLIResponseTestCase(test.TestCase):
def test_empty(self):
- self.assertEqual(0, len(storwize_svc.CLIResponse('')))
- self.assertEqual(0, len(storwize_svc.CLIResponse(('', 'stderr'))))
+ self.assertEqual(0, len(ssh.CLIResponse('')))
+ self.assertEqual(0, len(ssh.CLIResponse(('', 'stderr'))))
def test_header(self):
raw = r'''id!name
1!node1
2!node2
'''
- resp = storwize_svc.CLIResponse(raw, with_header=True)
+ resp = ssh.CLIResponse(raw, with_header=True)
self.assertEqual(2, len(resp))
self.assertEqual('1', resp[0]['id'])
self.assertEqual('2', resp[1]['id'])
home address!s3
home address!s4
'''
- resp = storwize_svc.CLIResponse(raw, with_header=False)
+ resp = ssh.CLIResponse(raw, with_header=False)
self.assertEqual(list(resp.select('home address', 'name',
'home address')),
[('s1', 'Bill', 's1'), ('s2', 'Bill2', 's2'),
1!node1!!500507680200C744!online
2!node2!!500507680200C745!online
'''
- resp = storwize_svc.CLIResponse(raw)
+ resp = ssh.CLIResponse(raw)
self.assertEqual(2, len(resp))
self.assertEqual('1', resp[0]['id'])
self.assertEqual('500507680200C744', resp[0]['WWNN'])
port_status!inactive
port_speed!8Gb
'''
- resp = storwize_svc.CLIResponse(raw, with_header=False)
+ resp = ssh.CLIResponse(raw, with_header=False)
self.assertEqual(1, len(resp))
self.assertEqual('1', resp[0]['id'])
self.assertEqual(list(resp.select('port_id', 'port_status')),
--- /dev/null
+# Copyright 2013 IBM Corp.
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+"""
+Volume driver for IBM Storwize family and SVC storage systems.
+
+Notes:
+1. If you specify both a password and a key file, this driver will use the
+ key file only.
+2. When using a key file for authentication, it is up to the user or
+ system administrator to store the private key in a safe manner.
+3. The defaults for creating volumes are "-rsize 2% -autoexpand
+ -grainsize 256 -warning 0". These can be changed in the configuration
+ file or by using volume types(recommended only for advanced users).
+
+Limitations:
+1. The driver expects CLI output in English, error messages may be in a
+ localized format.
+2. Clones and creating volumes from snapshots, where the source and target
+ are of different sizes, is not supported.
+
+"""
+
+from oslo.config import cfg
+
+from cinder import context
+from cinder import exception
+from cinder.openstack.common import excutils
+from cinder.openstack.common import log as logging
+from cinder import units
+from cinder.volume.drivers.ibm.storwize_svc import helpers as storwize_helpers
+from cinder.volume.drivers.san import san
+from cinder.volume import volume_types
+
+LOG = logging.getLogger(__name__)
+
+storwize_svc_opts = [
+ cfg.StrOpt('storwize_svc_volpool_name',
+ default='volpool',
+ help='Storage system storage pool for volumes'),
+ cfg.IntOpt('storwize_svc_vol_rsize',
+ default=2,
+ help='Storage system space-efficiency parameter for volumes '
+ '(percentage)'),
+ cfg.IntOpt('storwize_svc_vol_warning',
+ default=0,
+ help='Storage system threshold for volume capacity warnings '
+ '(percentage)'),
+ cfg.BoolOpt('storwize_svc_vol_autoexpand',
+ default=True,
+ help='Storage system autoexpand parameter for volumes '
+ '(True/False)'),
+ cfg.IntOpt('storwize_svc_vol_grainsize',
+ default=256,
+ help='Storage system grain size parameter for volumes '
+ '(32/64/128/256)'),
+ cfg.BoolOpt('storwize_svc_vol_compression',
+ default=False,
+ help='Storage system compression option for volumes'),
+ cfg.BoolOpt('storwize_svc_vol_easytier',
+ default=True,
+ help='Enable Easy Tier for volumes'),
+ cfg.IntOpt('storwize_svc_vol_iogrp',
+ default=0,
+ help='The I/O group in which to allocate volumes'),
+ cfg.IntOpt('storwize_svc_flashcopy_timeout',
+ default=120,
+ help='Maximum number of seconds to wait for FlashCopy to be '
+ 'prepared. Maximum value is 600 seconds (10 minutes)'),
+ cfg.StrOpt('storwize_svc_connection_protocol',
+ default='iSCSI',
+ help='Connection protocol (iSCSI/FC)'),
+ cfg.BoolOpt('storwize_svc_iscsi_chap_enabled',
+ default=True,
+ help='Configure CHAP authentication for iSCSI connections '
+ '(Default: Enabled)'),
+ cfg.BoolOpt('storwize_svc_multipath_enabled',
+ default=False,
+ help='Connect with multipath (FC only; iSCSI multipath is '
+ 'controlled by Nova)'),
+ cfg.BoolOpt('storwize_svc_multihostmap_enabled',
+ default=True,
+ help='Allows vdisk to multi host mapping'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(storwize_svc_opts)
+
+
+class StorwizeSVCDriver(san.SanDriver):
+ """IBM Storwize V7000 and SVC iSCSI/FC volume driver.
+
+ Version history:
+ 1.0 - Initial driver
+ 1.1 - FC support, create_cloned_volume, volume type support,
+ get_volume_stats, minor bug fixes
+ 1.2.0 - Added retype
+ 1.2.1 - Code refactor, improved exception handling
+ """
+
+ VERSION = "1.2.1"
+
+ def __init__(self, *args, **kwargs):
+ super(StorwizeSVCDriver, self).__init__(*args, **kwargs)
+ self.configuration.append_config_values(storwize_svc_opts)
+ self._helpers = storwize_helpers.StorwizeHelpers(self._run_ssh)
+ self._state = {'storage_nodes': {},
+ 'enabled_protocols': set(),
+ 'compression_enabled': False,
+ 'available_iogrps': [],
+ 'system_name': None,
+ 'system_id': None,
+ 'extent_size': None,
+ 'code_level': None,
+ }
+
+ def do_setup(self, ctxt):
+ """Check that we have all configuration details from the storage."""
+ LOG.debug(_('enter: do_setup'))
+
+ # Get storage system name, id, and code level
+ self._state.update(self._helpers.get_system_info())
+
+ # Validate that the pool exists
+ pool = self.configuration.storwize_svc_volpool_name
+ try:
+ attributes = self._helpers.get_pool_attrs(pool)
+ except exception.VolumeBackendAPIException:
+ msg = _('Failed getting details for pool %s') % pool
+ raise exception.InvalidInput(reason=msg)
+ self._state['extent_size'] = attributes['extent_size']
+
+ # Check if compression is supported
+ self._state['compression_enabled'] = \
+ self._helpers.compression_enabled()
+
+ # Get the available I/O groups
+ self._state['available_iogrps'] = \
+ self._helpers.get_available_io_groups()
+
+ # Get the iSCSI and FC names of the Storwize/SVC nodes
+ self._state['storage_nodes'] = self._helpers.get_node_info()
+
+ # Add the iSCSI IP addresses and WWPNs to the storage node info
+ self._helpers.add_iscsi_ip_addrs(self._state['storage_nodes'])
+ self._helpers.add_fc_wwpns(self._state['storage_nodes'])
+
+ # For each node, check what connection modes it supports. Delete any
+ # nodes that do not support any types (may be partially configured).
+ to_delete = []
+ for k, node in self._state['storage_nodes'].iteritems():
+ if ((len(node['ipv4']) or len(node['ipv6']))
+ and len(node['iscsi_name'])):
+ node['enabled_protocols'].append('iSCSI')
+ self._state['enabled_protocols'].add('iSCSI')
+ if len(node['WWPN']):
+ node['enabled_protocols'].append('FC')
+ self._state['enabled_protocols'].add('FC')
+ if not len(node['enabled_protocols']):
+ to_delete.append(k)
+ for delkey in to_delete:
+ del self._state['storage_nodes'][delkey]
+
+ # Make sure we have at least one node configured
+ if not len(self._state['storage_nodes']):
+ msg = _('do_setup: No configured nodes.')
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+ LOG.debug(_('leave: do_setup'))
+
+ def check_for_setup_error(self):
+ """Ensure that the flags are set properly."""
+ LOG.debug(_('enter: check_for_setup_error'))
+
+ # Check that we have the system ID information
+ if self._state['system_name'] is None:
+ exception_msg = (_('Unable to determine system name'))
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+ if self._state['system_id'] is None:
+ exception_msg = (_('Unable to determine system id'))
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+ if self._state['extent_size'] is None:
+ exception_msg = (_('Unable to determine pool extent size'))
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ required_flags = ['san_ip', 'san_ssh_port', 'san_login',
+ 'storwize_svc_volpool_name']
+ for flag in required_flags:
+ if not self.configuration.safe_get(flag):
+ raise exception.InvalidInput(reason=_('%s is not set') % flag)
+
+ # Ensure that either password or keyfile were set
+ if not (self.configuration.san_password or
+ self.configuration.san_private_key):
+ raise exception.InvalidInput(
+ reason=_('Password or SSH private key is required for '
+ 'authentication: set either san_password or '
+ 'san_private_key option'))
+
+ # Check that flashcopy_timeout is not more than 10 minutes
+ flashcopy_timeout = self.configuration.storwize_svc_flashcopy_timeout
+ if not (flashcopy_timeout > 0 and flashcopy_timeout <= 600):
+ raise exception.InvalidInput(
+ reason=_('Illegal value %d specified for '
+ 'storwize_svc_flashcopy_timeout: '
+ 'valid values are between 0 and 600')
+ % flashcopy_timeout)
+
+ opts = self._helpers.build_default_opts(self.configuration)
+ self._helpers.check_vdisk_opts(self._state, opts)
+
+ LOG.debug(_('leave: check_for_setup_error'))
+
+ def ensure_export(self, ctxt, volume):
+ """Check that the volume exists on the storage.
+
+ The system does not "export" volumes as a Linux iSCSI target does,
+ and therefore we just check that the volume exists on the storage.
+ """
+ volume_defined = self._helpers.is_vdisk_defined(volume['name'])
+ if not volume_defined:
+ LOG.error(_('ensure_export: Volume %s not found on storage')
+ % volume['name'])
+
+ def create_export(self, ctxt, volume):
+ model_update = None
+ return model_update
+
+ def remove_export(self, ctxt, volume):
+ pass
+
+ def validate_connector(self, connector):
+ """Check connector for at least one enabled protocol (iSCSI/FC)."""
+ valid = False
+ if ('iSCSI' in self._state['enabled_protocols'] and
+ 'initiator' in connector):
+ valid = True
+ if 'FC' in self._state['enabled_protocols'] and 'wwpns' in connector:
+ valid = True
+ if not valid:
+ msg = (_('The connector does not contain the required '
+ 'information.'))
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ def _get_vdisk_params(self, type_id, volume_type=None):
+ return self._helpers.get_vdisk_params(self.configuration, self._state,
+ type_id, volume_type=volume_type)
+
+ def initialize_connection(self, volume, connector):
+ """Perform the necessary work so that an iSCSI/FC connection can
+ be made.
+
+ To be able to create an iSCSI/FC connection from a given host to a
+ volume, we must:
+ 1. Translate the given iSCSI name or WWNN to a host name
+ 2. Create new host on the storage system if it does not yet exist
+ 3. Map the volume to the host if it is not already done
+ 4. Return the connection information for relevant nodes (in the
+ proper I/O group)
+
+ """
+
+ LOG.debug(_('enter: initialize_connection: volume %(vol)s with '
+ 'connector %(conn)s') % {'vol': str(volume),
+ 'conn': str(connector)})
+
+ vol_opts = self._get_vdisk_params(volume['volume_type_id'])
+ host_name = connector['host']
+ volume_name = volume['name']
+
+ # Check if a host object is defined for this host name
+ host_name = self._helpers.get_host_from_connector(connector)
+ if host_name is None:
+ # Host does not exist - add a new host to Storwize/SVC
+ host_name = self._helpers.create_host(connector)
+
+ if vol_opts['protocol'] == 'iSCSI':
+ chap_secret = self._helpers.get_chap_secret_for_host(host_name)
+ chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled
+ if chap_enabled and chap_secret is None:
+ chap_secret = self._helpers.add_chap_secret_to_host(host_name)
+ elif not chap_enabled and chap_secret:
+ LOG.warning(_('CHAP secret exists for host but CHAP is '
+ 'disabled'))
+
+ volume_attributes = self._helpers.get_vdisk_attributes(volume_name)
+ if volume_attributes is None:
+ msg = (_('initialize_connection: Failed to get attributes'
+ ' for volume %s') % volume_name)
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ multihostmap = self.configuration.storwize_svc_multihostmap_enabled
+ lun_id = self._helpers.map_vol_to_host(volume_name, host_name,
+ multihostmap)
+ try:
+ preferred_node = volume_attributes['preferred_node_id']
+ IO_group = volume_attributes['IO_group_id']
+ except KeyError as e:
+ LOG.error(_('Did not find expected column name in '
+ 'lsvdisk: %s') % str(e))
+ msg = (_('initialize_connection: Missing volume '
+ 'attribute for volume %s') % volume_name)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ try:
+ # Get preferred node and other nodes in I/O group
+ preferred_node_entry = None
+ io_group_nodes = []
+ for node in self._state['storage_nodes'].itervalues():
+ if vol_opts['protocol'] not in node['enabled_protocols']:
+ continue
+ if node['id'] == preferred_node:
+ preferred_node_entry = node
+ if node['IO_group'] == IO_group:
+ io_group_nodes.append(node)
+
+ if not len(io_group_nodes):
+ msg = (_('initialize_connection: No node found in '
+ 'I/O group %(gid)s for volume %(vol)s') %
+ {'gid': IO_group, 'vol': volume_name})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ if not preferred_node_entry and not vol_opts['multipath']:
+ # Get 1st node in I/O group
+ preferred_node_entry = io_group_nodes[0]
+ LOG.warn(_('initialize_connection: Did not find a preferred '
+ 'node for volume %s') % volume_name)
+
+ properties = {}
+ properties['target_discovered'] = False
+ properties['target_lun'] = lun_id
+ properties['volume_id'] = volume['id']
+ if vol_opts['protocol'] == 'iSCSI':
+ type_str = 'iscsi'
+ if len(preferred_node_entry['ipv4']):
+ ipaddr = preferred_node_entry['ipv4'][0]
+ else:
+ ipaddr = preferred_node_entry['ipv6'][0]
+ properties['target_portal'] = '%s:%s' % (ipaddr, '3260')
+ properties['target_iqn'] = preferred_node_entry['iscsi_name']
+ if chap_secret:
+ properties['auth_method'] = 'CHAP'
+ properties['auth_username'] = connector['initiator']
+ properties['auth_password'] = chap_secret
+ else:
+ type_str = 'fibre_channel'
+ conn_wwpns = self._helpers.get_conn_fc_wwpns(host_name)
+ if len(conn_wwpns) == 0:
+ msg = (_('Could not get FC connection information for the '
+ 'host-volume connection. Is the host configured '
+ 'properly for FC connections?'))
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ if not vol_opts['multipath']:
+ if preferred_node_entry['WWPN'] in conn_wwpns:
+ properties['target_wwn'] = preferred_node_entry['WWPN']
+ else:
+ properties['target_wwn'] = conn_wwpns[0]
+ else:
+ properties['target_wwn'] = conn_wwpns
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.terminate_connection(volume, connector)
+ LOG.error(_('initialize_connection: Failed to collect return '
+ 'properties for volume %(vol)s and connector '
+ '%(conn)s.\n') % {'vol': str(volume),
+ 'conn': str(connector)})
+
+ LOG.debug(_('leave: initialize_connection:\n volume: %(vol)s\n '
+ 'connector %(conn)s\n properties: %(prop)s')
+ % {'vol': str(volume),
+ 'conn': str(connector),
+ 'prop': str(properties)})
+
+ return {'driver_volume_type': type_str, 'data': properties, }
+
+ def terminate_connection(self, volume, connector, **kwargs):
+ """Cleanup after an iSCSI connection has been terminated.
+
+ When we clean up a terminated connection between a given connector
+ and volume, we:
+ 1. Translate the given connector to a host name
+ 2. Remove the volume-to-host mapping if it exists
+ 3. Delete the host if it has no more mappings (hosts are created
+ automatically by this driver when mappings are created)
+ """
+ LOG.debug(_('enter: terminate_connection: volume %(vol)s with '
+ 'connector %(conn)s') % {'vol': str(volume),
+ 'conn': str(connector)})
+
+ vol_name = volume['name']
+ if 'host' in connector:
+ host_name = self._helpers.get_host_from_connector(connector)
+ if host_name is None:
+ msg = (_('terminate_connection: Failed to get host name from'
+ ' connector.'))
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+ else:
+ # See bug #1244257
+ host_name = None
+
+ self._helpers.unmap_vol_from_host(vol_name, host_name)
+
+ LOG.debug(_('leave: terminate_connection: volume %(vol)s with '
+ 'connector %(conn)s') % {'vol': str(volume),
+ 'conn': str(connector)})
+
+ def create_volume(self, volume):
+ opts = self._get_vdisk_params(volume['volume_type_id'])
+ pool = self.configuration.storwize_svc_volpool_name
+ return self._helpers.create_vdisk(volume['name'], str(volume['size']),
+ 'gb', pool, opts)
+
+ def delete_volume(self, volume):
+ self._helpers.delete_vdisk(volume['name'], False)
+
+ def create_snapshot(self, snapshot):
+ ctxt = context.get_admin_context()
+ source_vol = self.db.volume_get(ctxt, snapshot['volume_id'])
+ opts = self._get_vdisk_params(source_vol['volume_type_id'])
+ self._helpers.create_copy(snapshot['volume_name'], snapshot['name'],
+ snapshot['volume_id'], self.configuration,
+ opts, False)
+
+ def delete_snapshot(self, snapshot):
+ self._helpers.delete_vdisk(snapshot['name'], False)
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ if volume['size'] != snapshot['volume_size']:
+ msg = (_('create_volume_from_snapshot: Source and destination '
+ 'size differ.'))
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ opts = self._get_vdisk_params(volume['volume_type_id'])
+ self._helpers.create_copy(snapshot['name'], volume['name'],
+ snapshot['id'], self.configuration,
+ opts, True)
+
+ def create_cloned_volume(self, tgt_volume, src_volume):
+ if src_volume['size'] != tgt_volume['size']:
+ msg = (_('create_cloned_volume: Source and destination '
+ 'size differ.'))
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ opts = self._get_vdisk_params(tgt_volume['volume_type_id'])
+ self._helpers.create_copy(src_volume['name'], tgt_volume['name'],
+ src_volume['id'], self.configuration,
+ opts, True)
+
+ def extend_volume(self, volume, new_size):
+ LOG.debug(_('enter: extend_volume: volume %s') % volume['id'])
+ ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'],
+ allow_snaps=False)
+ if not ret:
+ msg = (_('extend_volume: Extending a volume with snapshots is not '
+ 'supported.'))
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ extend_amt = int(new_size) - volume['size']
+ self._helpers.extend_vdisk(volume['name'], extend_amt)
+ LOG.debug(_('leave: extend_volume: volume %s') % volume['id'])
+
+ def migrate_volume(self, ctxt, volume, host):
+ """Migrate directly if source and dest are managed by same storage.
+
+ The method uses the migratevdisk method, which returns almost
+ immediately, if the source and target pools have the same extent_size.
+ Otherwise, it uses addvdiskcopy and rmvdiskcopy, which require waiting
+ for the copy operation to complete.
+
+ :param ctxt: Context
+ :param volume: A dictionary describing the volume to migrate
+ :param host: A dictionary describing the host to migrate to, where
+ host['host'] is its name, and host['capabilities'] is a
+ dictionary of its reported capabilities.
+ """
+ LOG.debug(_('enter: migrate_volume: id=%(id)s, host=%(host)s') %
+ {'id': volume['id'], 'host': host['host']})
+
+ false_ret = (False, None)
+ dest_pool = self._helpers.can_migrate_to_host(host, self._state)
+ if dest_pool is None:
+ return false_ret
+
+ if 'extent_size' not in host['capabilities']:
+ return false_ret
+ if host['capabilities']['extent_size'] == self._state['extent_size']:
+ # If source and dest pools have the same extent size, migratevdisk
+ self._helpers.migrate_vdisk(volume['name'], dest_pool)
+ else:
+ # If source and dest pool extent size differ, add/delete vdisk copy
+ ctxt = context.get_admin_context()
+ if volume['volume_type_id'] is not None:
+ volume_type_id = volume['volume_type_id']
+ vol_type = volume_types.get_volume_type(ctxt, volume_type_id)
+ else:
+ vol_type = None
+ self._helpers.migrate_volume_vdiskcopy(volume['name'], dest_pool,
+ vol_type,
+ self._state,
+ self.configuration)
+
+ LOG.debug(_('leave: migrate_volume: id=%(id)s, host=%(host)s') %
+ {'id': volume['id'], 'host': host['host']})
+ return (True, None)
+
+ def retype(self, ctxt, volume, new_type, diff, host):
+ """Convert the volume to be of the new type.
+
+ Returns a boolean indicating whether the retype occurred.
+
+ :param ctxt: Context
+ :param volume: A dictionary describing the volume to migrate
+ :param new_type: A dictionary describing the volume type to convert to
+ :param diff: A dictionary with the difference between the two types
+ :param host: A dictionary describing the host to migrate to, where
+ host['host'] is its name, and host['capabilities'] is a
+ dictionary of its reported capabilities.
+ """
+ LOG.debug(_('enter: retype: id=%(id)s, new_type=%(new_type)s,'
+ 'diff=%(diff)s, host=%(host)s') % {'id': volume['id'],
+ 'new_type': new_type,
+ 'diff': diff,
+ 'host': host})
+
+ ignore_keys = ['protocol', 'multipath']
+ no_copy_keys = ['warning', 'autoexpand', 'easytier', 'iogrp']
+ copy_keys = ['rsize', 'grainsize', 'compression']
+ all_keys = ignore_keys + no_copy_keys + copy_keys
+ old_opts = self._get_vdisk_params(volume['volume_type_id'])
+ new_opts = self._get_vdisk_params(new_type['id'],
+ volume_type=new_type)
+
+ vdisk_changes = []
+ need_copy = False
+ for key in all_keys:
+ if old_opts[key] != new_opts[key]:
+ if key in copy_keys:
+ need_copy = True
+ break
+ elif key in no_copy_keys:
+ vdisk_changes.append(key)
+
+ dest_location = host['capabilities'].get('location_info')
+ if self._stats['location_info'] != dest_location:
+ need_copy = True
+
+ if need_copy:
+ dest_pool = self._helpers.can_migrate_to_host(host, self._state)
+ if dest_pool is None:
+ return False
+
+ self._helpers.migrate_volume_vdiskcopy(volume['name'], dest_pool,
+ new_type,
+ self._state,
+ self.configuration)
+ else:
+ self._helpers.change_vdisk_options(volume['name'], vdisk_changes,
+ new_opts, self._state)
+
+ LOG.debug(_('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
+ 'diff=%(diff)s, host=%(host)s') % {'id': volume['id'],
+ 'new_type': new_type,
+ 'diff': diff,
+ 'host': host['host']})
+ return True
+
+ def get_volume_stats(self, refresh=False):
+ """Get volume stats.
+
+ If we haven't gotten stats yet or 'refresh' is True,
+ run update the stats first.
+ """
+ if not self._stats or refresh:
+ self._update_volume_stats()
+
+ return self._stats
+
+ def _update_volume_stats(self):
+ """Retrieve stats info from volume group."""
+
+ LOG.debug(_("Updating volume stats"))
+ data = {}
+
+ data['vendor_name'] = 'IBM'
+ data['driver_version'] = self.VERSION
+ data['storage_protocol'] = list(self._state['enabled_protocols'])
+
+ data['total_capacity_gb'] = 0 # To be overwritten
+ data['free_capacity_gb'] = 0 # To be overwritten
+ data['reserved_percentage'] = self.configuration.reserved_percentage
+ data['QoS_support'] = False
+
+ pool = self.configuration.storwize_svc_volpool_name
+ backend_name = self.configuration.safe_get('volume_backend_name')
+ if not backend_name:
+ backend_name = '%s_%s' % (self._state['system_name'], pool)
+ data['volume_backend_name'] = backend_name
+
+ attributes = self._helpers.get_pool_attrs(pool)
+ if not attributes:
+ LOG.error(_('Could not get pool data from the storage'))
+ exception_message = (_('_update_volume_stats: '
+ 'Could not get storage pool data'))
+ raise exception.VolumeBackendAPIException(data=exception_message)
+
+ data['total_capacity_gb'] = (float(attributes['capacity']) /
+ units.GiB)
+ data['free_capacity_gb'] = (float(attributes['free_capacity']) /
+ units.GiB)
+ data['easytier_support'] = attributes['easy_tier'] in ['on', 'auto']
+ data['compression_support'] = self._state['compression_enabled']
+ data['extent_size'] = self._state['extent_size']
+ data['location_info'] = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' %
+ {'sys_id': self._state['system_id'],
+ 'pool': pool})
+
+ self._stats = data
--- /dev/null
+# Copyright 2014 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import random
+import re
+import six
+import unicodedata
+
+from eventlet import greenthread
+
+from cinder import context
+from cinder import exception
+from cinder.openstack.common import excutils
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import loopingcall
+from cinder.openstack.common import strutils
+from cinder import utils
+from cinder.volume.drivers.ibm.storwize_svc import ssh as storwize_ssh
+from cinder.volume import volume_types
+
+LOG = logging.getLogger(__name__)
+
+
+class StorwizeHelpers(object):
+ def __init__(self, run_ssh):
+ self.ssh = storwize_ssh.StorwizeSSH(run_ssh)
+ self.check_fcmapping_interval = 3
+
+ @staticmethod
+ def handle_keyerror(cmd, out):
+ msg = (_('Could not find key in output of command %(cmd)s: %(out)s')
+ % {'out': out, 'cmd': cmd})
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ def compression_enabled(self):
+ """Return whether or not compression is enabled for this system."""
+ resp = self.ssh.lslicense()
+ keys = ['license_compression_enclosures',
+ 'license_compression_capacity']
+ for key in keys:
+ if resp[key] != '0':
+ return True
+ return False
+
+ def get_system_info(self):
+ """Return system's name, ID, and code level."""
+ resp = self.ssh.lssystem()
+ level = resp['code_level']
+ match_obj = re.search('([0-9].){3}[0-9]', level)
+ if match_obj is None:
+ msg = _('Failed to get code level (%s).') % str(level)
+ raise exception.VolumeBackendAPIException(data=msg)
+ code_level = match_obj.group().split('.')
+ return {'code_level': tuple([int(x) for x in code_level]),
+ 'system_name': resp['name'],
+ 'system_id': resp['id']}
+
+ def get_pool_attrs(self, pool):
+ """Return attributes for the specified pool."""
+ return self.ssh.lsmdiskgrp(pool)
+
+ def get_available_io_groups(self):
+ """Return list of available IO groups."""
+ iogrps = []
+ resp = self.ssh.lsiogrp()
+ for iogrp in resp:
+ try:
+ if int(iogrp['node_count']) > 0:
+ iogrps.append(int(iogrp['id']))
+ except KeyError:
+ self.handle_keyerror('lsiogrp', str(iogrp))
+ except ValueError:
+ msg = (_('Expected integer for node_count, '
+ 'svcinfo lsiogrp returned: %(node)s') %
+ {'node': iogrp['node_count']})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ return iogrps
+
+ def get_node_info(self):
+ """Return dictionary containing information on system's nodes."""
+ nodes = {}
+ resp = self.ssh.lsnode()
+ for node_data in resp:
+ try:
+ if node_data['status'] != 'online':
+ continue
+ node = {}
+ node['id'] = node_data['id']
+ node['name'] = node_data['name']
+ node['IO_group'] = node_data['IO_group_id']
+ node['iscsi_name'] = node_data['iscsi_name']
+ node['WWNN'] = node_data['WWNN']
+ node['status'] = node_data['status']
+ node['WWPN'] = []
+ node['ipv4'] = []
+ node['ipv6'] = []
+ node['enabled_protocols'] = []
+ nodes[node['id']] = node
+ except KeyError:
+ self.handle_keyerror('lsnode', str(node_data))
+ return nodes
+
+ def add_iscsi_ip_addrs(self, storage_nodes):
+ """Add iSCSI IP addresses to system node information."""
+ resp = self.ssh.lsportip()
+ for ip_data in resp:
+ try:
+ state = ip_data['state']
+ if ip_data['node_id'] in storage_nodes and (
+ state == 'configured' or state == 'online'):
+ node = storage_nodes[ip_data['node_id']]
+ if len(ip_data['IP_address']):
+ node['ipv4'].append(ip_data['IP_address'])
+ if len(ip_data['IP_address_6']):
+ node['ipv6'].append(ip_data['IP_address_6'])
+ except KeyError:
+ self.handle_keyerror('lsportip', str(ip_data))
+
+ def add_fc_wwpns(self, storage_nodes):
+ """Add FC WWPNs to system node information."""
+ for key in storage_nodes:
+ node = storage_nodes[key]
+ resp = self.ssh.lsnode(node_id=node['id'])
+ wwpns = set(node['WWPN'])
+ for i, s in resp.select('port_id', 'port_status'):
+ if 'unconfigured' != s:
+ wwpns.add(i)
+ node['WWPN'] = list(wwpns)
+ LOG.info(_('WWPN on node %(node)s: %(wwpn)s')
+ % {'node': node['id'], 'wwpn': node['WWPN']})
+
+ def add_chap_secret_to_host(self, host_name):
+ """Generate and store a randomly-generated CHAP secret for the host."""
+ chap_secret = utils.generate_password()
+ self.ssh.add_chap_secret(chap_secret, host_name)
+ return chap_secret
+
+ def get_chap_secret_for_host(self, host_name):
+ """Generate and store a randomly-generated CHAP secret for the host."""
+ resp = self.ssh.lsiscsiauth()
+ host_found = False
+ for host_data in resp:
+ try:
+ if host_data['name'] == host_name:
+ host_found = True
+ if host_data['iscsi_auth_method'] == 'chap':
+ return host_data['iscsi_chap_secret']
+ except KeyError:
+ self.handle_keyerror('lsiscsiauth', str(host_data))
+ if not host_found:
+ msg = _('Failed to find host %s') % host_name
+ raise exception.VolumeBackendAPIException(data=msg)
+ return None
+
+ def get_conn_fc_wwpns(self, host):
+ wwpns = []
+ resp = self.ssh.lsfabric(host=host)
+ for wwpn in resp.select('local_wwpn'):
+ wwpns.append(wwpn)
+ return wwpns
+
+ def get_host_from_connector(self, connector):
+ """Return the Storwize host described by the connector."""
+ LOG.debug(_('enter: get_host_from_connector: %s') % str(connector))
+
+ # If we have FC information, we have a faster lookup option
+ host_name = None
+ if 'wwpns' in connector:
+ for wwpn in connector['wwpns']:
+ resp = self.ssh.lsfabric(wwpn=wwpn)
+ for wwpn_info in resp:
+ try:
+ if wwpn_info['remote_wwpn'] == wwpn:
+ host_name = wwpn_info['name']
+ except KeyError:
+ self.handle_keyerror('lsfabric', str(wwpn_info))
+
+ # That didn't work, so try exhaustive search
+ if not host_name:
+ hosts_info = self.ssh.lshost()
+ for name in hosts_info.select('name'):
+ resp = self.ssh.lshost(host=name)
+ for iscsi, wwpn in resp.select('iscsi_name', 'WWPN'):
+ if ('initiator' in connector and
+ iscsi == connector['initiator']):
+ host_name = name
+ elif ('wwpns' in connector and
+ len(connector['wwpns']) and
+ wwpn.lower() in
+ [str(x).lower for x in connector['wwpns']]):
+ host_name = name
+
+ LOG.debug(_('leave: get_host_from_connector: host %s') % host_name)
+ return host_name
+
+ def create_host(self, connector):
+ """Create a new host on the storage system.
+
+ We create a host name and associate it with the given connection
+ information. The host name will be a cleaned up version of the given
+ host name (at most 55 characters), plus a random 8-character suffix to
+ avoid collisions. The total length should be at most 63 characters.
+ """
+ LOG.debug(_('enter: create_host: host %s') % connector['host'])
+
+ # Before we start, make sure host name is a string and that we have at
+ # least one port.
+ host_name = connector['host']
+ if not isinstance(host_name, six.string_types):
+ msg = _('create_host: Host name is not unicode or string')
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ ports = []
+ if 'initiator' in connector:
+ ports.append(['initiator', '%s' % connector['initiator']])
+ if 'wwpns' in connector:
+ for wwpn in connector['wwpns']:
+ ports.append(['wwpn', '%s' % wwpn])
+ if not len(ports):
+ msg = _('create_host: No initiators or wwpns supplied.')
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ # Build a host name for the Storwize host - first clean up the name
+ if isinstance(host_name, unicode):
+ host_name = unicodedata.normalize('NFKD', host_name).encode(
+ 'ascii', 'replace').decode('ascii')
+
+ for num in range(0, 128):
+ ch = str(chr(num))
+ if not ch.isalnum() and ch not in [' ', '.', '-', '_']:
+ host_name = host_name.replace(ch, '-')
+
+ # Storwize doesn't like hostname that doesn't starts with letter or _.
+ if not re.match('^[A-Za-z]', host_name):
+ host_name = '_' + host_name
+
+ # Add a random 8-character suffix to avoid collisions
+ rand_id = str(random.randint(0, 99999999)).zfill(8)
+ host_name = '%s-%s' % (host_name[:55], rand_id)
+
+ # Create a host with one port
+ port = ports.pop(0)
+ self.ssh.mkhost(host_name, port[0], port[1])
+
+ # Add any additional ports to the host
+ for port in ports:
+ self.ssh.addhostport(host_name, port[0], port[1])
+
+ LOG.debug(_('leave: create_host: host %(host)s - %(host_name)s') %
+ {'host': connector['host'], 'host_name': host_name})
+ return host_name
+
+ def delete_host(self, host_name):
+ self.ssh.rmhost(host_name)
+
+ def map_vol_to_host(self, volume_name, host_name, multihostmap):
+ """Create a mapping between a volume to a host."""
+
+ LOG.debug(_('enter: map_vol_to_host: volume %(volume_name)s to '
+ 'host %(host_name)s')
+ % {'volume_name': volume_name, 'host_name': host_name})
+
+ # Check if this volume is already mapped to this host
+ mapped = False
+ luns_used = []
+ result_lun = '-1'
+ resp = self.ssh.lshostvdiskmap(host_name)
+ for mapping_info in resp:
+ luns_used.append(int(mapping_info['SCSI_id']))
+ if mapping_info['vdisk_name'] == volume_name:
+ mapped = True
+ result_lun = mapping_info['SCSI_id']
+
+ if not mapped:
+ # Find unused lun
+ luns_used.sort()
+ result_lun = str(len(luns_used))
+ for index, n in enumerate(luns_used):
+ if n > index:
+ result_lun = str(index)
+ break
+ self.ssh.mkvdiskhostmap(host_name, volume_name, result_lun,
+ multihostmap)
+
+ LOG.debug(_('leave: map_vol_to_host: LUN %(result_lun)s, volume '
+ '%(volume_name)s, host %(host_name)s') %
+ {'result_lun': result_lun,
+ 'volume_name': volume_name,
+ 'host_name': host_name})
+ return result_lun
+
+ def unmap_vol_from_host(self, volume_name, host_name):
+ """Unmap the volume and delete the host if it has no more mappings."""
+
+ LOG.debug(_('enter: unmap_vol_from_host: volume %(volume_name)s from '
+ 'host %(host_name)s')
+ % {'volume_name': volume_name, 'host_name': host_name})
+
+ # Check if the mapping exists
+ resp = self.ssh.lsvdiskhostmap(volume_name)
+ if not len(resp):
+ LOG.warning(_('unmap_vol_from_host: No mapping of volume '
+ '%(vol_name)s to any host found.') %
+ {'vol_name': volume_name})
+ return
+ if host_name is None:
+ if len(resp) > 1:
+ LOG.warning(_('unmap_vol_from_host: Multiple mappings of '
+ 'volume %(vol_name)s found, no host '
+ 'specified.') % {'vol_name': volume_name})
+ return
+ else:
+ host_name = resp[0]['host_name']
+ else:
+ found = False
+ for h in resp.select('host_name'):
+ if h == host_name:
+ found = True
+ if not found:
+ LOG.warning(_('unmap_vol_from_host: No mapping of volume '
+ '%(vol_name)s to host %(host) found.') %
+ {'vol_name': volume_name, 'host': host_name})
+
+ # We now know that the mapping exists
+ self.ssh.rmvdiskhostmap(host_name, volume_name)
+
+ # If this host has no more mappings, delete it
+ resp = self.ssh.lshostvdiskmap(host_name)
+ if not len(resp):
+ self.delete_host(host_name)
+
+ LOG.debug(_('leave: unmap_vol_from_host: volume %(volume_name)s from '
+ 'host %(host_name)s')
+ % {'volume_name': volume_name, 'host_name': host_name})
+
+ @staticmethod
+ def build_default_opts(config):
+ # Ignore capitalization
+ protocol = config.storwize_svc_connection_protocol
+ if protocol.lower() == 'fc':
+ protocol = 'FC'
+ elif protocol.lower() == 'iscsi':
+ protocol = 'iSCSI'
+
+ opt = {'rsize': config.storwize_svc_vol_rsize,
+ 'warning': config.storwize_svc_vol_warning,
+ 'autoexpand': config.storwize_svc_vol_autoexpand,
+ 'grainsize': config.storwize_svc_vol_grainsize,
+ 'compression': config.storwize_svc_vol_compression,
+ 'easytier': config.storwize_svc_vol_easytier,
+ 'protocol': protocol,
+ 'multipath': config.storwize_svc_multipath_enabled,
+ 'iogrp': config.storwize_svc_vol_iogrp}
+ return opt
+
+ @staticmethod
+ def check_vdisk_opts(state, opts):
+ # Check that rsize is either -1 or between 0 and 100
+ if not (opts['rsize'] >= -1 and opts['rsize'] <= 100):
+ raise exception.InvalidInput(
+ reason=_('Illegal value specified for storwize_svc_vol_rsize: '
+ 'set to either a percentage (0-100) or -1'))
+
+ # Check that warning is either -1 or between 0 and 100
+ if not (opts['warning'] >= -1 and opts['warning'] <= 100):
+ raise exception.InvalidInput(
+ reason=_('Illegal value specified for '
+ 'storwize_svc_vol_warning: '
+ 'set to a percentage (0-100)'))
+
+ # Check that grainsize is 32/64/128/256
+ if opts['grainsize'] not in [32, 64, 128, 256]:
+ raise exception.InvalidInput(
+ reason=_('Illegal value specified for '
+ 'storwize_svc_vol_grainsize: set to either '
+ '32, 64, 128, or 256'))
+
+ # Check that compression is supported
+ if opts['compression'] and not state['compression_enabled']:
+ raise exception.InvalidInput(
+ reason=_('System does not support compression'))
+
+ # Check that rsize is set if compression is set
+ if opts['compression'] and opts['rsize'] == -1:
+ raise exception.InvalidInput(
+ reason=_('If compression is set to True, rsize must '
+ 'also be set (not equal to -1)'))
+
+ # Check that the requested protocol is enabled
+ if opts['protocol'] not in state['enabled_protocols']:
+ raise exception.InvalidInput(
+ reason=_('Illegal value %(prot)s specified for '
+ 'storwize_svc_connection_protocol: '
+ 'valid values are %(enabled)s')
+ % {'prot': opts['protocol'],
+ 'enabled': ','.join(state['enabled_protocols'])})
+
+ if opts['iogrp'] not in state['available_iogrps']:
+ avail_grps = ''.join(str(e) for e in state['available_iogrps'])
+ raise exception.InvalidInput(
+ reason=_('I/O group %(iogrp)d is not valid; available '
+ 'I/O groups are %(avail)s')
+ % {'iogrp': opts['iogrp'],
+ 'avail': avail_grps})
+
+ def get_vdisk_params(self, config, state, type_id, volume_type=None):
+ """Return the parameters for creating the vdisk.
+
+ Takes volume type and defaults from config options into account.
+ """
+ opts = self.build_default_opts(config)
+ if volume_type is None and type_id is not None:
+ ctxt = context.get_admin_context()
+ volume_type = volume_types.get_volume_type(ctxt, type_id)
+ if volume_type:
+ specs = dict(volume_type).get('extra_specs')
+ for k, value in specs.iteritems():
+ # Get the scope, if using scope format
+ key_split = k.split(':')
+ if len(key_split) == 1:
+ scope = None
+ key = key_split[0]
+ else:
+ scope = key_split[0]
+ key = key_split[1]
+
+ # We generally do not look at capabilities in the driver, but
+ # protocol is a special case where the user asks for a given
+ # protocol and we want both the scheduler and the driver to act
+ # on the value.
+ if scope == 'capabilities' and key == 'storage_protocol':
+ scope = None
+ key = 'protocol'
+ words = value.split()
+ if not (words and len(words) == 2 and words[0] == '<in>'):
+ LOG.error(_('Protocol must be specified as '
+ '\'<in> iSCSI\' or \'<in> FC\'.'))
+ del words[0]
+ value = words[0]
+
+ # Any keys that the driver should look at should have the
+ # 'drivers' scope.
+ if scope and scope != 'drivers':
+ continue
+
+ if key in opts:
+ this_type = type(opts[key]).__name__
+ if this_type == 'int':
+ value = int(value)
+ elif this_type == 'bool':
+ value = strutils.bool_from_string(value)
+ opts[key] = value
+
+ self.check_vdisk_opts(state, opts)
+ return opts
+
+ @staticmethod
+ def _get_vdisk_create_params(opts):
+ easytier = 'on' if opts['easytier'] else 'off'
+
+ if opts['rsize'] == -1:
+ params = []
+ else:
+ params = ['-rsize', '%s%%' % str(opts['rsize']),
+ '-autoexpand', '-warning',
+ '%s%%' % str(opts['warning'])]
+ if not opts['autoexpand']:
+ params.remove('-autoexpand')
+
+ if opts['compression']:
+ params.append('-compressed')
+ else:
+ params.extend(['-grainsize', str(opts['grainsize'])])
+
+ params.extend(['-easytier', easytier])
+ return params
+
+ def create_vdisk(self, name, size, units, pool, opts):
+ LOG.debug(_('enter: create_vdisk: vdisk %s ') % name)
+ params = self._get_vdisk_create_params(opts)
+ self.ssh.mkvdisk(name, size, units, pool, opts, params)
+ LOG.debug(_('leave: _create_vdisk: volume %s ') % name)
+
+ def get_vdisk_attributes(self, vdisk):
+ attrs = self.ssh.lsvdisk(vdisk)
+ return attrs
+
+ def is_vdisk_defined(self, vdisk_name):
+ """Check if vdisk is defined."""
+ attrs = self.get_vdisk_attributes(vdisk_name)
+ return attrs is not None
+
+ def _prepare_fc_map(self, fc_map_id, timeout):
+ self.ssh.prestartfcmap(fc_map_id)
+ mapping_ready = False
+ wait_time = 5
+ max_retries = (timeout / wait_time) + 1
+ for try_number in range(1, max_retries):
+ mapping_attrs = self._get_flashcopy_mapping_attributes(fc_map_id)
+ if (mapping_attrs is None or
+ 'status' not in mapping_attrs):
+ break
+ if mapping_attrs['status'] == 'prepared':
+ mapping_ready = True
+ break
+ elif mapping_attrs['status'] == 'stopped':
+ self.ssh.prestartfcmap(fc_map_id)
+ elif mapping_attrs['status'] != 'preparing':
+ msg = (_('Unexecpted mapping status %(status)s for mapping'
+ '%(id)s. Attributes: %(attr)s')
+ % {'status': mapping_attrs['status'],
+ 'id': fc_map_id,
+ 'attr': mapping_attrs})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ greenthread.sleep(wait_time)
+
+ if not mapping_ready:
+ msg = (_('Mapping %(id)s prepare failed to complete within the'
+ 'allotted %(to)d seconds timeout. Terminating.')
+ % {'id': fc_map_id,
+ 'to': timeout})
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ def run_flashcopy(self, source, target, timeout, full_copy=True):
+ """Create a FlashCopy mapping from the source to the target."""
+ LOG.debug(_('enter: run_flashcopy: execute FlashCopy from source '
+ '%(source)s to target %(target)s') %
+ {'source': source, 'target': target})
+
+ fc_map_id = self.ssh.mkfcmap(source, target, full_copy)
+ self._prepare_fc_map(fc_map_id, timeout)
+ self.ssh.startfcmap(fc_map_id)
+
+ LOG.debug(_('leave: run_flashcopy: FlashCopy started from '
+ '%(source)s to %(target)s') %
+ {'source': source, 'target': target})
+
+ def _get_vdisk_fc_mappings(self, vdisk):
+ """Return FlashCopy mappings that this vdisk is associated with."""
+ mapping_ids = []
+ resp = self.ssh.lsvdiskfcmappings(vdisk)
+ for id in resp.select('id'):
+ mapping_ids.append(id)
+ return mapping_ids
+
+ def _get_flashcopy_mapping_attributes(self, fc_map_id):
+ resp = self.ssh.lsfcmap(fc_map_id)
+ if not len(resp):
+ return None
+ return resp[0]
+
+ def _check_vdisk_fc_mappings(self, name, allow_snaps=True):
+ """FlashCopy mapping check helper."""
+ LOG.debug(_('Loopcall: _check_vdisk_fc_mappings(), vdisk %s') % name)
+ mapping_ids = self._get_vdisk_fc_mappings(name)
+ wait_for_copy = False
+ for map_id in mapping_ids:
+ attrs = self._get_flashcopy_mapping_attributes(map_id)
+ if not attrs:
+ continue
+ source = attrs['source_vdisk_name']
+ target = attrs['target_vdisk_name']
+ copy_rate = attrs['copy_rate']
+ status = attrs['status']
+
+ if copy_rate == '0':
+ if source == name:
+ # Vdisk with snapshots. Return False if snapshot
+ # not allowed.
+ if not allow_snaps:
+ raise loopingcall.LoopingCallDone(retvalue=False)
+ self.ssh.chfcmap(map_id, copyrate='50', autodel='on')
+ wait_for_copy = True
+ else:
+ # A snapshot
+ if target != name:
+ msg = (_('Vdisk %(name)s not involved in '
+ 'mapping %(src)s -> %(tgt)s') %
+ {'name': name, 'src': source, 'tgt': target})
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+ if status in ['copying', 'prepared']:
+ self.ssh.stopfcmap(map_id)
+ # Need to wait for the fcmap to change to
+ # stopped state before remove fcmap
+ wait_for_copy = True
+ elif status in ['stopping', 'preparing']:
+ wait_for_copy = True
+ else:
+ self.ssh.rmfcmap(map_id)
+ # Case 4: Copy in progress - wait and will autodelete
+ else:
+ if status == 'prepared':
+ self.ssh.stopfcmap(map_id)
+ self.ssh.rmfcmap(map_id)
+ elif status == 'idle_or_copied':
+ # Prepare failed
+ self.ssh.rmfcmap(map_id)
+ else:
+ wait_for_copy = True
+ if not wait_for_copy or not len(mapping_ids):
+ raise loopingcall.LoopingCallDone(retvalue=True)
+
+ def ensure_vdisk_no_fc_mappings(self, name, allow_snaps=True):
+ """Ensure vdisk has no flashcopy mappings."""
+ timer = loopingcall.FixedIntervalLoopingCall(
+ self._check_vdisk_fc_mappings, name, allow_snaps)
+ # Create a timer greenthread. The default volume service heart
+ # beat is every 10 seconds. The flashcopy usually takes hours
+ # before it finishes. Don't set the sleep interval shorter
+ # than the heartbeat. Otherwise volume service heartbeat
+ # will not be serviced.
+ LOG.debug(_('Calling _ensure_vdisk_no_fc_mappings: vdisk %s')
+ % name)
+ ret = timer.start(interval=self.check_fcmapping_interval).wait()
+ timer.stop()
+ return ret
+
+ def delete_vdisk(self, vdisk, force):
+ """Ensures that vdisk is not part of FC mapping and deletes it."""
+ LOG.debug(_('enter: delete_vdisk: vdisk %s') % vdisk)
+ if not self.is_vdisk_defined(vdisk):
+ LOG.info(_('Tried to delete non-existant vdisk %s.') % vdisk)
+ return
+ self.ensure_vdisk_no_fc_mappings(vdisk)
+ self.ssh.rmvdisk(vdisk, force=force)
+ LOG.debug(_('leave: delete_vdisk: vdisk %s') % vdisk)
+
+ def create_copy(self, src, tgt, src_id, config, opts, full_copy):
+ """Create a new snapshot using FlashCopy."""
+ LOG.debug(_('enter: create_copy: snapshot %(src)s to %(tgt)s') %
+ {'tgt': tgt, 'src': src})
+
+ src_attrs = self.get_vdisk_attributes(src)
+ if src_attrs is None:
+ msg = (_('create_copy: Source vdisk %(src)s (%(src_id)s) '
+ 'does not exist') % {'src': src, 'src_id': src_id})
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ src_size = src_attrs['capacity']
+ pool = config.storwize_svc_volpool_name
+ self.create_vdisk(tgt, src_size, 'b', pool, opts)
+ timeout = config.storwize_svc_flashcopy_timeout
+ try:
+ self.run_flashcopy(src, tgt, timeout, full_copy=full_copy)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.delete_vdisk(tgt, True)
+
+ LOG.debug(_('leave: _create_copy: snapshot %(tgt)s from '
+ 'vdisk %(src)s') %
+ {'tgt': tgt, 'src': src})
+
+ def extend_vdisk(self, vdisk, amount):
+ self.ssh.expandvdisksize(vdisk, amount)
+
+ def migrate_volume_vdiskcopy(self, vdisk, dest_pool, volume_type,
+ state, config):
+ """Migrate a volume using addvdiskcopy and rmvdiskcopy.
+
+ This will add a vdisk copy with the given volume type in the given
+ pool, wait until it syncs, and delete the original copy.
+ """
+ this_pool = config.storwize_svc_volpool_name
+ resp = self.ssh.lsvdiskcopy(vdisk)
+ orig_copy_id = None
+ for copy_id, mdisk_grp in resp.select('copy_id', 'mdisk_grp_name'):
+ if mdisk_grp == this_pool:
+ orig_copy_id = copy_id
+
+ if orig_copy_id is None:
+ msg = (_('migrate_volume started without a vdisk copy in the '
+ 'expected pool.'))
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ if volume_type is None:
+ opts = self.get_vdisk_params(config, state, None)
+ else:
+ opts = self.get_vdisk_params(config, state, volume_type['id'],
+ volume_type=volume_type)
+ params = self._get_vdisk_create_params(opts)
+ new_copy_id = self.ssh.addvdiskcopy(vdisk, dest_pool, params)
+
+ sync = False
+ while not sync:
+ sync = self.ssh.lsvdiskcopy(vdisk, copy_id=new_copy_id)[0]['sync']
+ if sync == 'yes':
+ sync = True
+ else:
+ greenthread.sleep(10)
+
+ self.ssh.rmvdiskcopy(vdisk, orig_copy_id)
+
+ def migrate_vdisk(self, vdisk, dest_pool):
+ self.ssh.migratevdisk(vdisk, dest_pool)
+
+ @staticmethod
+ def can_migrate_to_host(host, state):
+ if 'location_info' not in host['capabilities']:
+ return None
+ info = host['capabilities']['location_info']
+ try:
+ (dest_type, dest_id, dest_pool) = info.split(':')
+ except ValueError:
+ return None
+ if (dest_type != 'StorwizeSVCDriver' or dest_id != state['system_id']):
+ return None
+ return dest_pool
+
+ def change_vdisk_options(self, vdisk, changes, opts, state):
+ if 'iogrp' in opts:
+ opts['iogrp'] = str(opts['iogrp'])
+ if 'warning' in opts:
+ opts['warning'] = '%s%%' % str(opts['warning'])
+ if 'easytier' in opts:
+ opts['easytier'] = 'on' if opts['easytier'] else 'off'
+ if 'autoexpand' in opts:
+ opts['autoexpand'] = 'on' if opts['autoexpand'] else 'off'
+
+ if 'iogrp' in changes:
+ changes.remove('iogrp')
+ if state['code_level'] < (6, 4, 0, 0):
+ LOG.debug(_('Ignore change IO group as storage code level '
+ 'is %(code_level)s, below then '
+ '6.4.0.0') % {'code_level': state['code_level']})
+ else:
+ self.ssh.movevdisk(vdisk, opts['iogrp'])
+
+ for key in changes:
+ self.ssh.chvdisk(vdisk, ['-' + key, opts[key]])
--- /dev/null
+# Copyright 2014 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import re
+
+from cinder import exception
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import processutils
+
+LOG = logging.getLogger(__name__)
+
+
+class StorwizeSSH(object):
+ """SSH interface to IBM Storwize family and SVC storage systems."""
+ def __init__(self, run_ssh):
+ self._ssh = run_ssh
+
+ def _run_ssh(self, ssh_cmd):
+ try:
+ return self._ssh(ssh_cmd)
+ except processutils.ProcessExecutionError as e:
+ msg = (_('CLI Exception output:\n command: %(cmd)s\n '
+ 'stdout: %(out)s\n stderr: %(err)s') %
+ {'cmd': ssh_cmd,
+ 'out': e.stdout,
+ 'err': e.stderr})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ def run_ssh_info(self, ssh_cmd, delim='!', with_header=False):
+ """Run an SSH command and return parsed output."""
+ raw = self._run_ssh(ssh_cmd)
+ return CLIResponse(raw, ssh_cmd=ssh_cmd, delim=delim,
+ with_header=with_header)
+
+ def run_ssh_assert_no_output(self, ssh_cmd):
+ """Run an SSH command and assert no output returned."""
+ out, err = self._run_ssh(ssh_cmd)
+ if len(out.strip()) != 0:
+ msg = (_('Expected no output from CLI command %(cmd)s, '
+ 'got %(out)s') % {'cmd': ' '.join(ssh_cmd), 'out': out})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ def run_ssh_check_created(self, ssh_cmd):
+ """Run an SSH command and return the ID of the created object."""
+ out, err = self._run_ssh(ssh_cmd)
+ try:
+ match_obj = re.search(r'\[([0-9]+)\],? successfully created', out)
+ return match_obj.group(1)
+ except (AttributeError, IndexError):
+ msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n '
+ 'stdout: %(out)s\n stderr: %(err)s') %
+ {'cmd': ssh_cmd,
+ 'out': out,
+ 'err': err})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ def lsnode(self, node_id=None):
+ with_header = True
+ ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!']
+ if node_id:
+ with_header = False
+ ssh_cmd.append(node_id)
+ return self.run_ssh_info(ssh_cmd, with_header=with_header)
+
+ def lslicense(self):
+ ssh_cmd = ['svcinfo', 'lslicense', '-delim', '!']
+ return self.run_ssh_info(ssh_cmd)[0]
+
+ def lssystem(self):
+ ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!']
+ return self.run_ssh_info(ssh_cmd)[0]
+
+ def lsmdiskgrp(self, pool):
+ ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool]
+ return self.run_ssh_info(ssh_cmd)[0]
+
+ def lsiogrp(self):
+ ssh_cmd = ['svcinfo', 'lsiogrp', '-delim', '!']
+ return self.run_ssh_info(ssh_cmd, with_header=True)
+
+ def lsportip(self):
+ ssh_cmd = ['svcinfo', 'lsportip', '-delim', '!']
+ return self.run_ssh_info(ssh_cmd, with_header=True)
+
+ @staticmethod
+ def _create_port_arg(port_type, port_name):
+ if port_type == 'initiator':
+ port = ['-iscsiname']
+ else:
+ port = ['-hbawwpn']
+ port.append(port_name)
+ return port
+
+ def mkhost(self, host_name, port_type, port_name):
+ port = self._create_port_arg(port_type, port_name)
+ ssh_cmd = ['svctask', 'mkhost', '-force'] + port + ['-name', host_name]
+ return self.run_ssh_check_created(ssh_cmd)
+
+ def addhostport(self, host, port_type, port_name):
+ port = self._create_port_arg(port_type, port_name)
+ ssh_cmd = ['svctask', 'addhostport', '-force'] + port + [host]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+ def lshost(self, host=None):
+ with_header = True
+ ssh_cmd = ['svcinfo', 'lshost', '-delim', '!']
+ if host:
+ with_header = False
+ ssh_cmd.append(host)
+ return self.run_ssh_info(ssh_cmd, with_header=with_header)
+
+ def add_chap_secret(self, secret, host):
+ ssh_cmd = ['svctask', 'chhost', '-chapsecret', secret, host]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+ def lsiscsiauth(self):
+ ssh_cmd = ['svcinfo', 'lsiscsiauth', '-delim', '!']
+ return self.run_ssh_info(ssh_cmd, with_header=True)
+
+ def lsfabric(self, wwpn=None, host=None):
+ if wwpn:
+ ssh_cmd = ['svcinfo', 'lsfabric', '-wwpn', wwpn, '-delim', '!']
+ elif host:
+ ssh_cmd = ['svcinfo', 'lsfabric', '-host', host]
+ else:
+ msg = (_('Must pass wwpn or host to lsfabric.'))
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+ return self.run_ssh_info(ssh_cmd, with_header=True)
+
+ def mkvdiskhostmap(self, host, vdisk, lun, multihostmap):
+ """Map vdisk to host.
+
+ If vdisk already mapped and multihostmap is True, use the force flag.
+ """
+ ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', host,
+ '-scsi', lun, vdisk]
+ out, err = self._ssh(ssh_cmd, check_exit_code=False)
+ if 'successfully created' in out:
+ return
+ if not err:
+ msg = (_('Did not find success message nor error for %(fun)s: '
+ '%(out)s') % {'out': out, 'fun': ssh_cmd})
+ raise exception.VolumeBackendAPIException(data=msg)
+ if err.startswith('CMMVC6071E'):
+ if not multihostmap:
+ LOG.error(_('storwize_svc_multihostmap_enabled is set '
+ 'to False, not allowing multi host mapping.'))
+ msg = 'CMMVC6071E The VDisk-to-host mapping '\
+ 'was not created because the VDisk is '\
+ 'already mapped to a host.\n"'
+ raise exception.VolumeDriverException(message=msg)
+
+ ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force')
+ return self.run_ssh_check_created(ssh_cmd)
+
+ def rmvdiskhostmap(self, host, vdisk):
+ ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', host, vdisk]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+ def lsvdiskhostmap(self, vdisk):
+ ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', vdisk]
+ return self.run_ssh_info(ssh_cmd, with_header=True)
+
+ def lshostvdiskmap(self, host):
+ ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', host]
+ return self.run_ssh_info(ssh_cmd, with_header=True)
+
+ def rmhost(self, host):
+ ssh_cmd = ['svctask', 'rmhost', host]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+ def mkvdisk(self, name, size, units, pool, opts, params):
+ ssh_cmd = ['svctask', 'mkvdisk', '-name', name, '-mdiskgrp', pool,
+ '-iogrp', str(opts['iogrp']), '-size', size, '-unit',
+ units] + params
+ return self.run_ssh_check_created(ssh_cmd)
+
+ def rmvdisk(self, vdisk, force=True):
+ ssh_cmd = ['svctask', 'rmvdisk']
+ if force:
+ ssh_cmd += ['-force']
+ ssh_cmd += [vdisk]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+ def lsvdisk(self, vdisk):
+ """Return vdisk attributes or None if it doesn't exist."""
+ ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!', vdisk]
+ out, err = self._ssh(ssh_cmd, check_exit_code=False)
+ if not len(err):
+ return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!',
+ with_header=False)[0]
+ if err.startswith('CMMVC5754E'):
+ return None
+ msg = (_('CLI Exception output:\n command: %(cmd)s\n '
+ 'stdout: %(out)s\n stderr: %(err)s') %
+ {'cmd': ssh_cmd,
+ 'out': out,
+ 'err': err})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ def chvdisk(self, vdisk, params):
+ ssh_cmd = ['svctask', 'chvdisk'] + params + [vdisk]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+ def movevdisk(self, vdisk, iogrp):
+ ssh_cmd = ['svctask', 'movevdisk', '-iogrp', iogrp, vdisk]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+ def expandvdisksize(self, vdisk, amount):
+ ssh_cmd = (['svctask', 'expandvdisksize', '-size', str(amount),
+ '-unit', 'gb', vdisk])
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+ def migratevdisk(self, vdisk, dest_pool):
+ ssh_cmd = ['svctask', 'migratevdisk', '-mdiskgrp', dest_pool,
+ '-vdisk', vdisk]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+ def mkfcmap(self, source, target, full_copy):
+ ssh_cmd = ['svctask', 'mkfcmap', '-source', source, '-target',
+ target, '-autodelete']
+ if not full_copy:
+ ssh_cmd.extend(['-copyrate', '0'])
+ out, err = self._ssh(ssh_cmd, check_exit_code=False)
+ if 'successfully created' not in out:
+ msg = (_('CLI Exception output:\n command: %(cmd)s\n '
+ 'stdout: %(out)s\n stderr: %(err)s') %
+ {'cmd': ssh_cmd,
+ 'out': out,
+ 'err': err})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ try:
+ match_obj = re.search(r'FlashCopy Mapping, id \[([0-9]+)\], '
+ 'successfully created', out)
+ fc_map_id = match_obj.group(1)
+ except (AttributeError, IndexError):
+ msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n '
+ 'stdout: %(out)s\n stderr: %(err)s') %
+ {'cmd': ssh_cmd,
+ 'out': out,
+ 'err': err})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ return fc_map_id
+
+ def prestartfcmap(self, fc_map_id):
+ ssh_cmd = ['svctask', 'prestartfcmap', fc_map_id]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+ def startfcmap(self, fc_map_id):
+ ssh_cmd = ['svctask', 'startfcmap', fc_map_id]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+ def chfcmap(self, fc_map_id, copyrate='50', autodel='on'):
+ ssh_cmd = ['svctask', 'chfcmap', '-copyrate', copyrate,
+ '-autodelete', autodel, fc_map_id]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+ def stopfcmap(self, fc_map_id):
+ ssh_cmd = ['svctask', 'stopfcmap', fc_map_id]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+ def rmfcmap(self, fc_map_id):
+ ssh_cmd = ['svctask', 'rmfcmap', '-force', fc_map_id]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+ def lsvdiskfcmappings(self, vdisk):
+ ssh_cmd = ['svcinfo', 'lsvdiskfcmappings', '-delim', '!', vdisk]
+ return self.run_ssh_info(ssh_cmd, with_header=True)
+
+ def lsfcmap(self, fc_map_id):
+ ssh_cmd = ['svcinfo', 'lsfcmap', '-filtervalue',
+ 'id=%s' % fc_map_id, '-delim', '!']
+ return self.run_ssh_info(ssh_cmd, with_header=True)
+
+ def addvdiskcopy(self, vdisk, dest_pool, params):
+ ssh_cmd = (['svctask', 'addvdiskcopy'] + params + ['-mdiskgrp',
+ dest_pool, vdisk])
+ return self.run_ssh_check_created(ssh_cmd)
+
+ def lsvdiskcopy(self, vdisk, copy_id=None):
+ ssh_cmd = ['svcinfo', 'lsvdiskcopy', '-delim', '!']
+ with_header = True
+ if copy_id:
+ ssh_cmd += ['-copy', copy_id]
+ with_header = False
+ ssh_cmd += [vdisk]
+ return self.run_ssh_info(ssh_cmd, with_header=with_header)
+
+ def rmvdiskcopy(self, vdisk, copy_id):
+ ssh_cmd = ['svctask', 'rmvdiskcopy', '-copy', copy_id, vdisk]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+
+class CLIResponse(object):
+ '''Parse SVC CLI output and generate iterable.'''
+
+ def __init__(self, raw, ssh_cmd=None, delim='!', with_header=True):
+ super(CLIResponse, self).__init__()
+ if ssh_cmd:
+ self.ssh_cmd = ' '.join(ssh_cmd)
+ else:
+ self.ssh_cmd = 'None'
+ self.raw = raw
+ self.delim = delim
+ self.with_header = with_header
+ self.result = self._parse()
+
+ def select(self, *keys):
+ for a in self.result:
+ vs = []
+ for k in keys:
+ v = a.get(k, None)
+ if isinstance(v, basestring) or v is None:
+ v = [v]
+ if isinstance(v, list):
+ vs.append(v)
+ for item in zip(*vs):
+ if len(item) == 1:
+ yield item[0]
+ else:
+ yield item
+
+ def __getitem__(self, key):
+ try:
+ return self.result[key]
+ except KeyError:
+ msg = (_('Did not find expected key %(key)s in %(fun)s: %(raw)s') %
+ {'key': key, 'fun': self.ssh_cmd, 'raw': self.raw})
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ def __iter__(self):
+ for a in self.result:
+ yield a
+
+ def __len__(self):
+ return len(self.result)
+
+ def _parse(self):
+ def get_reader(content, delim):
+ for line in content.lstrip().splitlines():
+ line = line.strip()
+ if line:
+ yield line.split(delim)
+ else:
+ yield []
+
+ if isinstance(self.raw, basestring):
+ stdout, stderr = self.raw, ''
+ else:
+ stdout, stderr = self.raw
+ reader = get_reader(stdout, self.delim)
+ result = []
+
+ if self.with_header:
+ hds = tuple()
+ for row in reader:
+ hds = row
+ break
+ for row in reader:
+ cur = dict()
+ if len(hds) != len(row):
+ msg = (_('Unexpected CLI response: header/row mismatch. '
+ 'header: %(header)s, row: %(row)s')
+ % {'header': str(hds), 'row': str(row)})
+ raise exception.VolumeBackendAPIException(data=msg)
+ for k, v in zip(hds, row):
+ CLIResponse.append_dict(cur, k, v)
+ result.append(cur)
+ else:
+ cur = dict()
+ for row in reader:
+ if row:
+ CLIResponse.append_dict(cur, row[0], ' '.join(row[1:]))
+ elif cur: # start new section
+ result.append(cur)
+ cur = dict()
+ if cur:
+ result.append(cur)
+ return result
+
+ @staticmethod
+ def append_dict(dict_, key, value):
+ key, value = key.strip(), value.strip()
+ obj = dict_.get(key, None)
+ if obj is None:
+ dict_[key] = value
+ elif isinstance(obj, list):
+ obj.append(value)
+ dict_[key] = obj
+ else:
+ dict_[key] = [obj, value]
+ return dict_
+++ /dev/null
-# Copyright 2013 IBM Corp.
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-"""
-Volume driver for IBM Storwize family and SVC storage systems.
-
-Notes:
-1. If you specify both a password and a key file, this driver will use the
- key file only.
-2. When using a key file for authentication, it is up to the user or
- system administrator to store the private key in a safe manner.
-3. The defaults for creating volumes are "-rsize 2% -autoexpand
- -grainsize 256 -warning 0". These can be changed in the configuration
- file or by using volume types(recommended only for advanced users).
-
-Limitations:
-1. The driver expects CLI output in English, error messages may be in a
- localized format.
-2. Clones and creating volumes from snapshots, where the source and target
- are of different sizes, is not supported.
-
-"""
-
-import random
-import re
-import string
-import time
-
-from oslo.config import cfg
-
-from cinder import context
-from cinder import exception
-from cinder.openstack.common import excutils
-from cinder.openstack.common import log as logging
-from cinder.openstack.common import loopingcall
-from cinder.openstack.common import processutils
-from cinder.openstack.common import strutils
-from cinder import units
-from cinder import utils
-from cinder.volume.drivers.san import san
-from cinder.volume import volume_types
-
-LOG = logging.getLogger(__name__)
-
-storwize_svc_opts = [
- cfg.StrOpt('storwize_svc_volpool_name',
- default='volpool',
- help='Storage system storage pool for volumes'),
- cfg.IntOpt('storwize_svc_vol_rsize',
- default=2,
- help='Storage system space-efficiency parameter for volumes '
- '(percentage)'),
- cfg.IntOpt('storwize_svc_vol_warning',
- default=0,
- help='Storage system threshold for volume capacity warnings '
- '(percentage)'),
- cfg.BoolOpt('storwize_svc_vol_autoexpand',
- default=True,
- help='Storage system autoexpand parameter for volumes '
- '(True/False)'),
- cfg.IntOpt('storwize_svc_vol_grainsize',
- default=256,
- help='Storage system grain size parameter for volumes '
- '(32/64/128/256)'),
- cfg.BoolOpt('storwize_svc_vol_compression',
- default=False,
- help='Storage system compression option for volumes'),
- cfg.BoolOpt('storwize_svc_vol_easytier',
- default=True,
- help='Enable Easy Tier for volumes'),
- cfg.IntOpt('storwize_svc_vol_iogrp',
- default=0,
- help='The I/O group in which to allocate volumes'),
- cfg.IntOpt('storwize_svc_flashcopy_timeout',
- default=120,
- help='Maximum number of seconds to wait for FlashCopy to be '
- 'prepared. Maximum value is 600 seconds (10 minutes)'),
- cfg.StrOpt('storwize_svc_connection_protocol',
- default='iSCSI',
- help='Connection protocol (iSCSI/FC)'),
- cfg.BoolOpt('storwize_svc_iscsi_chap_enabled',
- default=True,
- help='Configure CHAP authentication for iSCSI connections '
- '(Default: Enabled)'),
- cfg.BoolOpt('storwize_svc_multipath_enabled',
- default=False,
- help='Connect with multipath (FC only; iSCSI multipath is '
- 'controlled by Nova)'),
- cfg.BoolOpt('storwize_svc_multihostmap_enabled',
- default=True,
- help='Allows vdisk to multi host mapping'),
-]
-
-
-CONF = cfg.CONF
-CONF.register_opts(storwize_svc_opts)
-
-CHECK_FCMAPPING_INTERVAL = 300
-
-
-class StorwizeSVCDriver(san.SanDriver):
- """IBM Storwize V7000 and SVC iSCSI/FC volume driver.
-
- Version history:
- 1.0 - Initial driver
- 1.1 - FC support, create_cloned_volume, volume type support,
- get_volume_stats, minor bug fixes
- 1.2.0 - Added retype
- """
-
- """====================================================================="""
- """ SETUP """
- """====================================================================="""
- VERSION = "1.2.0"
-
- def __init__(self, *args, **kwargs):
- super(StorwizeSVCDriver, self).__init__(*args, **kwargs)
- self.configuration.append_config_values(storwize_svc_opts)
- self._storage_nodes = {}
- self._enabled_protocols = set()
- self._compression_enabled = False
- self._available_iogrps = []
- self._context = None
- self._system_name = None
- self._system_id = None
- self._extent_size = None
- self._code_level = None
-
- # Build cleanup translation tables for host names
- invalid_ch_in_host = ''
- for num in range(0, 128):
- ch = str(chr(num))
- if (not ch.isalnum() and ch != ' ' and ch != '.'
- and ch != '-' and ch != '_'):
- invalid_ch_in_host = invalid_ch_in_host + ch
- self._string_host_name_filter = string.maketrans(
- invalid_ch_in_host, '-' * len(invalid_ch_in_host))
-
- self._unicode_host_name_filter = dict((ord(unicode(char)), u'-')
- for char in invalid_ch_in_host)
-
- def _get_iscsi_ip_addrs(self):
- generator = self._port_conf_generator(['svcinfo', 'lsportip'])
- header = next(generator, None)
- if not header:
- return
-
- for port_data in generator:
- try:
- port_node_id = port_data['node_id']
- port_ipv4 = port_data['IP_address']
- port_ipv6 = port_data['IP_address_6']
- state = port_data['state']
- except KeyError:
- self._handle_keyerror('lsportip', header)
-
- if port_node_id in self._storage_nodes and (
- state == 'configured' or state == 'online'):
- node = self._storage_nodes[port_node_id]
- if len(port_ipv4):
- node['ipv4'].append(port_ipv4)
- if len(port_ipv6):
- node['ipv6'].append(port_ipv6)
-
- def _get_fc_wwpns(self):
- for key in self._storage_nodes:
- node = self._storage_nodes[key]
- ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!', node['id']]
- raw = self._run_ssh(ssh_cmd)
- resp = CLIResponse(raw, delim='!', with_header=False)
- wwpns = set(node['WWPN'])
- for i, s in resp.select('port_id', 'port_status'):
- if 'unconfigured' != s:
- wwpns.add(i)
- node['WWPN'] = list(wwpns)
- LOG.info(_('WWPN on node %(node)s: %(wwpn)s')
- % {'node': node['id'], 'wwpn': node['WWPN']})
-
- def _get_code_level(self, level):
- match_obj = re.search('([0-9].){3}[0-9]', level)
- self._driver_assert(match_obj is not None, _('Get code level failed'))
- code_level = match_obj.group().split('.')
- return tuple(map(int, code_level))
-
- def do_setup(self, ctxt):
- """Check that we have all configuration details from the storage."""
-
- LOG.debug(_('enter: do_setup'))
- self._context = ctxt
-
- # Get storage system name and id
- ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!']
- attributes = self._execute_command_and_parse_attributes(ssh_cmd)
- if not attributes or not attributes['name']:
- msg = (_('do_setup: Could not get system name'))
- LOG.error(msg)
- raise exception.VolumeBackendAPIException(data=msg)
- self._system_name = attributes['name']
- self._system_id = attributes['id']
- self._code_level = self._get_code_level(attributes['code_level'])
-
- # Validate that the pool exists
- pool = self.configuration.storwize_svc_volpool_name
- attributes = self._get_pool_attrs(pool)
- self._extent_size = attributes['extent_size']
-
- # Check if compression is supported
- self._compression_enabled = False
- try:
- ssh_cmd = ['svcinfo', 'lslicense', '-delim', '!']
- out, err = self._run_ssh(ssh_cmd)
- license_lines = out.strip().split('\n')
- for license_line in license_lines:
- name, foo, value = license_line.partition('!')
- if name in ('license_compression_enclosures',
- 'license_compression_capacity') and value != '0':
- self._compression_enabled = True
- break
- except processutils.ProcessExecutionError:
- LOG.exception(_('Failed to get license information.'))
-
- # Get the available I/O groups
- ssh_cmd = ['svcinfo', 'lsiogrp', '-delim', '!']
- out, err = self._run_ssh(ssh_cmd)
- self._assert_ssh_return(len(out.strip()), 'do_setup',
- ssh_cmd, out, err)
- iogrps = out.strip().split('\n')
- self._assert_ssh_return(len(iogrps), 'do_setup', ssh_cmd, out, err)
- header = iogrps.pop(0)
- for iogrp_line in iogrps:
- try:
- iogrp_data = self._get_hdr_dic(header, iogrp_line, '!')
- if int(iogrp_data['node_count']) > 0:
- self._available_iogrps.append(int(iogrp_data['id']))
- except exception.VolumeBackendAPIException:
- with excutils.save_and_reraise_exception():
- self._log_cli_output_error('do_setup',
- ssh_cmd, out, err)
- except KeyError:
- self._handle_keyerror('lsnode', header)
- except ValueError:
- msg = (_('Expected integer for node_count, '
- 'svcinfo lsiogrp returned: %(node)s') %
- {'node': iogrp_data['node_count']})
- LOG.error(msg)
- raise exception.VolumeBackendAPIException(data=msg)
-
- # Get the iSCSI and FC names of the Storwize/SVC nodes
- ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!']
- out, err = self._run_ssh(ssh_cmd)
- self._assert_ssh_return(len(out.strip()), 'do_setup',
- ssh_cmd, out, err)
-
- nodes = out.strip().split('\n')
- self._assert_ssh_return(len(nodes), 'do_setup', ssh_cmd, out, err)
- header = nodes.pop(0)
- for node_line in nodes:
- try:
- node_data = self._get_hdr_dic(header, node_line, '!')
- except exception.VolumeBackendAPIException:
- with excutils.save_and_reraise_exception():
- self._log_cli_output_error('do_setup',
- ssh_cmd, out, err)
- node = {}
- try:
- node['id'] = node_data['id']
- node['name'] = node_data['name']
- node['IO_group'] = node_data['IO_group_id']
- node['iscsi_name'] = node_data['iscsi_name']
- node['WWNN'] = node_data['WWNN']
- node['status'] = node_data['status']
- node['WWPN'] = []
- node['ipv4'] = []
- node['ipv6'] = []
- node['enabled_protocols'] = []
- if node['status'] == 'online':
- self._storage_nodes[node['id']] = node
- except KeyError:
- self._handle_keyerror('lsnode', header)
-
- # Get the iSCSI IP addresses and WWPNs of the Storwize/SVC nodes
- self._get_iscsi_ip_addrs()
- self._get_fc_wwpns()
-
- # For each node, check what connection modes it supports. Delete any
- # nodes that do not support any types (may be partially configured).
- to_delete = []
- for k, node in self._storage_nodes.iteritems():
- if ((len(node['ipv4']) or len(node['ipv6']))
- and len(node['iscsi_name'])):
- node['enabled_protocols'].append('iSCSI')
- self._enabled_protocols.add('iSCSI')
- if len(node['WWPN']):
- node['enabled_protocols'].append('FC')
- self._enabled_protocols.add('FC')
- if not len(node['enabled_protocols']):
- to_delete.append(k)
-
- for delkey in to_delete:
- del self._storage_nodes[delkey]
-
- # Make sure we have at least one node configured
- self._driver_assert(len(self._storage_nodes),
- _('do_setup: No configured nodes'))
-
- LOG.debug(_('leave: do_setup'))
-
- def _build_default_opts(self):
- # Ignore capitalization
- protocol = self.configuration.storwize_svc_connection_protocol
- if protocol.lower() == 'fc':
- protocol = 'FC'
- elif protocol.lower() == 'iscsi':
- protocol = 'iSCSI'
-
- opt = {'rsize': self.configuration.storwize_svc_vol_rsize,
- 'warning': self.configuration.storwize_svc_vol_warning,
- 'autoexpand': self.configuration.storwize_svc_vol_autoexpand,
- 'grainsize': self.configuration.storwize_svc_vol_grainsize,
- 'compression': self.configuration.storwize_svc_vol_compression,
- 'easytier': self.configuration.storwize_svc_vol_easytier,
- 'protocol': protocol,
- 'multipath': self.configuration.storwize_svc_multipath_enabled,
- 'iogrp': self.configuration.storwize_svc_vol_iogrp}
- return opt
-
- def check_for_setup_error(self):
- """Ensure that the flags are set properly."""
- LOG.debug(_('enter: check_for_setup_error'))
-
- # Check that we have the system ID information
- if self._system_name is None:
- exception_msg = (_('Unable to determine system name'))
- raise exception.VolumeBackendAPIException(data=exception_msg)
- if self._system_id is None:
- exception_msg = (_('Unable to determine system id'))
- raise exception.VolumeBackendAPIException(data=exception_msg)
- if self._extent_size is None:
- exception_msg = (_('Unable to determine pool extent size'))
- raise exception.VolumeBackendAPIException(data=exception_msg)
-
- required_flags = ['san_ip', 'san_ssh_port', 'san_login',
- 'storwize_svc_volpool_name']
- for flag in required_flags:
- if not self.configuration.safe_get(flag):
- raise exception.InvalidInput(reason=_('%s is not set') % flag)
-
- # Ensure that either password or keyfile were set
- if not (self.configuration.san_password or
- self.configuration.san_private_key):
- raise exception.InvalidInput(
- reason=_('Password or SSH private key is required for '
- 'authentication: set either san_password or '
- 'san_private_key option'))
-
- # Check that flashcopy_timeout is not more than 10 minutes
- flashcopy_timeout = self.configuration.storwize_svc_flashcopy_timeout
- if not (flashcopy_timeout > 0 and flashcopy_timeout <= 600):
- raise exception.InvalidInput(
- reason=_('Illegal value %d specified for '
- 'storwize_svc_flashcopy_timeout: '
- 'valid values are between 0 and 600')
- % flashcopy_timeout)
-
- opts = self._build_default_opts()
- self._check_vdisk_opts(opts)
-
- LOG.debug(_('leave: check_for_setup_error'))
-
- """====================================================================="""
- """ INITIALIZE/TERMINATE CONNECTIONS """
- """====================================================================="""
-
- def ensure_export(self, ctxt, volume):
- """Check that the volume exists on the storage.
-
- The system does not "export" volumes as a Linux iSCSI target does,
- and therefore we just check that the volume exists on the storage.
- """
- volume_defined = self._is_vdisk_defined(volume['name'])
- if not volume_defined:
- LOG.error(_('ensure_export: Volume %s not found on storage')
- % volume['name'])
-
- def create_export(self, ctxt, volume):
- model_update = None
- return model_update
-
- def remove_export(self, ctxt, volume):
- pass
-
- def _add_chapsecret_to_host(self, host_name):
- """Generate and store a randomly-generated CHAP secret for the host."""
-
- chap_secret = utils.generate_password()
- ssh_cmd = ['svctask', 'chhost', '-chapsecret', chap_secret, host_name]
- out, err = self._run_ssh(ssh_cmd)
- # No output should be returned from chhost
- self._assert_ssh_return(len(out.strip()) == 0,
- '_add_chapsecret_to_host', ssh_cmd, out, err)
- return chap_secret
-
- def _get_chap_secret_for_host(self, host_name):
- """Return the CHAP secret for the given host."""
-
- LOG.debug(_('enter: _get_chap_secret_for_host: host name %s')
- % host_name)
-
- ssh_cmd = ['svcinfo', 'lsiscsiauth', '-delim', '!']
- out, err = self._run_ssh(ssh_cmd)
-
- if not len(out.strip()):
- return None
-
- host_lines = out.strip().split('\n')
- if not len(host_lines):
- return None
-
- header = host_lines.pop(0).split('!')
- self._assert_ssh_return('name' in header, '_get_chap_secret_for_host',
- ssh_cmd, out, err)
- self._assert_ssh_return('iscsi_auth_method' in header,
- '_get_chap_secret_for_host', ssh_cmd, out, err)
- self._assert_ssh_return('iscsi_chap_secret' in header,
- '_get_chap_secret_for_host', ssh_cmd, out, err)
- name_index = header.index('name')
- method_index = header.index('iscsi_auth_method')
- secret_index = header.index('iscsi_chap_secret')
-
- chap_secret = None
- host_found = False
- for line in host_lines:
- info = line.split('!')
- if info[name_index] == host_name:
- host_found = True
- if info[method_index] == 'chap':
- chap_secret = info[secret_index]
-
- self._assert_ssh_return(host_found, '_get_chap_secret_for_host',
- ssh_cmd, out, err)
-
- LOG.debug(_('leave: _get_chap_secret_for_host: host name '
- '%(host_name)s with secret %(chap_secret)s')
- % {'host_name': host_name, 'chap_secret': chap_secret})
-
- return chap_secret
-
- def _connector_to_hostname_prefix(self, connector):
- """Translate connector info to storage system host name.
-
- Translate a host's name and IP to the prefix of its hostname on the
- storage subsystem. We create a host name host name from the host and
- IP address, replacing any invalid characters (at most 55 characters),
- and adding a random 8-character suffix to avoid collisions. The total
- length should be at most 63 characters.
-
- """
-
- host_name = connector['host']
- if isinstance(host_name, unicode):
- host_name = host_name.translate(self._unicode_host_name_filter)
- elif isinstance(host_name, str):
- host_name = host_name.translate(self._string_host_name_filter)
- else:
- msg = _('_create_host: Cannot clean host name. Host name '
- 'is not unicode or string')
- LOG.error(msg)
- raise exception.NoValidHost(reason=msg)
-
- host_name = str(host_name)
-
- # Storwize family doesn't like hostname that starts with number.
- if not re.match('^[A-Za-z]', host_name):
- host_name = '_' + host_name
-
- return host_name[:55]
-
- def _find_host_from_wwpn(self, connector):
- for wwpn in connector['wwpns']:
- ssh_cmd = ['svcinfo', 'lsfabric', '-wwpn', wwpn, '-delim', '!']
- out, err = self._run_ssh(ssh_cmd)
-
- if not len(out.strip()):
- # This WWPN is not in use
- continue
-
- host_lines = out.strip().split('\n')
- header = host_lines.pop(0).split('!')
- self._assert_ssh_return('remote_wwpn' in header and
- 'name' in header,
- '_find_host_from_wwpn',
- ssh_cmd, out, err)
- rmt_wwpn_idx = header.index('remote_wwpn')
- name_idx = header.index('name')
-
- wwpns = map(lambda x: x.split('!')[rmt_wwpn_idx], host_lines)
-
- if wwpn in wwpns:
- # All the wwpns will be the mapping for the same
- # host from this WWPN-based query. Just pick
- # the name from first line.
- hostname = host_lines[0].split('!')[name_idx]
- return hostname
-
- # Didn't find a host
- return None
-
- def _find_host_exhaustive(self, connector, hosts):
- for host in hosts:
- ssh_cmd = ['svcinfo', 'lshost', '-delim', '!', host]
- out, err = self._run_ssh(ssh_cmd)
- self._assert_ssh_return(len(out.strip()),
- '_find_host_exhaustive',
- ssh_cmd, out, err)
- for attr_line in out.split('\n'):
- # If '!' not found, return the string and two empty strings
- attr_name, foo, attr_val = attr_line.partition('!')
- if (attr_name == 'iscsi_name' and
- 'initiator' in connector and
- attr_val == connector['initiator']):
- return host
- elif (attr_name == 'WWPN' and 'wwpns' in connector and
- attr_val.lower() in
- map(str.lower, map(str, connector['wwpns']))):
- return host
- return None
-
- def _get_host_from_connector(self, connector):
- """List the hosts defined in the storage.
-
- Return the host name with the given connection info, or None if there
- is no host fitting that information.
-
- """
-
- LOG.debug(_('enter: _get_host_from_connector: %s') % str(connector))
-
- # Get list of host in the storage
- ssh_cmd = ['svcinfo', 'lshost', '-delim', '!']
- out, err = self._run_ssh(ssh_cmd)
-
- if not len(out.strip()):
- return None
-
- # If we have FC information, we have a faster lookup option
- hostname = None
- if 'wwpns' in connector:
- hostname = self._find_host_from_wwpn(connector)
-
- # If we don't have a hostname yet, try the long way
- if not hostname:
- host_lines = out.strip().split('\n')
- self._assert_ssh_return(len(host_lines),
- '_get_host_from_connector',
- ssh_cmd, out, err)
- header = host_lines.pop(0).split('!')
- self._assert_ssh_return('name' in header,
- '_get_host_from_connector',
- ssh_cmd, out, err)
- name_index = header.index('name')
- hosts = map(lambda x: x.split('!')[name_index], host_lines)
- hostname = self._find_host_exhaustive(connector, hosts)
-
- LOG.debug(_('leave: _get_host_from_connector: host %s') % hostname)
-
- return hostname
-
- def _create_host(self, connector):
- """Create a new host on the storage system.
-
- We create a host name and associate it with the given connection
- information.
-
- """
-
- LOG.debug(_('enter: _create_host: host %s') % connector['host'])
-
- rand_id = str(random.randint(0, 99999999)).zfill(8)
- host_name = '%s-%s' % (self._connector_to_hostname_prefix(connector),
- rand_id)
-
- # Get all port information from the connector
- ports = []
- if 'initiator' in connector:
- ports.append('-iscsiname %s' % connector['initiator'])
- if 'wwpns' in connector:
- for wwpn in connector['wwpns']:
- ports.append('-hbawwpn %s' % wwpn)
-
- # When creating a host, we need one port
- self._driver_assert(len(ports), _('_create_host: No connector ports'))
- port1 = ports.pop(0)
- arg_name, arg_val = port1.split()
- ssh_cmd = ['svctask', 'mkhost', '-force', arg_name, arg_val, '-name',
- '"%s"' % host_name]
- out, err = self._run_ssh(ssh_cmd)
- self._assert_ssh_return('successfully created' in out,
- '_create_host', ssh_cmd, out, err)
-
- # Add any additional ports to the host
- for port in ports:
- arg_name, arg_val = port.split()
- ssh_cmd = ['svctask', 'addhostport', '-force', arg_name, arg_val,
- host_name]
- out, err = self._run_ssh(ssh_cmd)
-
- LOG.debug(_('leave: _create_host: host %(host)s - %(host_name)s') %
- {'host': connector['host'], 'host_name': host_name})
- return host_name
-
- def _get_vdiskhost_mappings(self, vdisk_name):
- """Return the defined storage mappings for a vdisk."""
-
- return_data = {}
- ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', vdisk_name]
- out, err = self._run_ssh(ssh_cmd)
-
- mappings = out.strip().split('\n')
- if len(mappings):
- header = mappings.pop(0)
- for mapping_line in mappings:
- mapping_data = self._get_hdr_dic(header, mapping_line, '!')
- return_data[mapping_data['host_name']] = mapping_data
-
- return return_data
-
- def _get_hostvdisk_mappings(self, host_name):
- """Return the defined storage mappings for a host."""
-
- return_data = {}
- ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', host_name]
- out, err = self._run_ssh(ssh_cmd)
-
- mappings = out.strip().split('\n')
- if len(mappings):
- header = mappings.pop(0)
- for mapping_line in mappings:
- mapping_data = self._get_hdr_dic(header, mapping_line, '!')
- return_data[mapping_data['vdisk_name']] = mapping_data
-
- return return_data
-
- def _map_vol_to_host(self, volume_name, host_name):
- """Create a mapping between a volume to a host."""
-
- LOG.debug(_('enter: _map_vol_to_host: volume %(volume_name)s to '
- 'host %(host_name)s')
- % {'volume_name': volume_name, 'host_name': host_name})
-
- # Check if this volume is already mapped to this host
- mapping_data = self._get_hostvdisk_mappings(host_name)
-
- mapped_flag = False
- result_lun = '-1'
- if volume_name in mapping_data:
- mapped_flag = True
- result_lun = mapping_data[volume_name]['SCSI_id']
- else:
- lun_used = [int(v['SCSI_id']) for v in mapping_data.values()]
- lun_used.sort()
- # Assume all luns are taken to this point, and then try to find
- # an unused one
- result_lun = str(len(lun_used))
- for index, n in enumerate(lun_used):
- if n > index:
- result_lun = str(index)
- break
-
- # Volume is not mapped to host, create a new LUN
- if not mapped_flag:
- ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', host_name,
- '-scsi', result_lun, volume_name]
- out, err = self._run_ssh(ssh_cmd, check_exit_code=False)
- if err and err.startswith('CMMVC6071E'):
- if not self.configuration.storwize_svc_multihostmap_enabled:
- LOG.error(_('storwize_svc_multihostmap_enabled is set '
- 'to False, Not allow multi host mapping'))
- exception_msg = 'CMMVC6071E The VDisk-to-host mapping '\
- 'was not created because the VDisk is '\
- 'already mapped to a host.\n"'
- raise exception.CinderException(data=exception_msg)
-
- for i in range(len(ssh_cmd)):
- if ssh_cmd[i] == 'mkvdiskhostmap':
- ssh_cmd.insert(i + 1, '-force')
-
- # try to map one volume to multiple hosts
- out, err = self._run_ssh(ssh_cmd)
- LOG.warn(_('volume %s mapping to multi host') % volume_name)
- self._assert_ssh_return('successfully created' in out,
- '_map_vol_to_host', ssh_cmd, out, err)
- else:
- self._assert_ssh_return('successfully created' in out,
- '_map_vol_to_host', ssh_cmd, out, err)
- LOG.debug(_('leave: _map_vol_to_host: LUN %(result_lun)s, volume '
- '%(volume_name)s, host %(host_name)s') %
- {'result_lun': result_lun,
- 'volume_name': volume_name,
- 'host_name': host_name})
- return result_lun
-
- def _delete_host(self, host_name):
- """Delete a host on the storage system."""
-
- LOG.debug(_('enter: _delete_host: host %s ') % host_name)
-
- ssh_cmd = ['svctask', 'rmhost', host_name]
- out, err = self._run_ssh(ssh_cmd)
- # No output should be returned from rmhost
- self._assert_ssh_return(len(out.strip()) == 0,
- '_delete_host', ssh_cmd, out, err)
-
- LOG.debug(_('leave: _delete_host: host %s ') % host_name)
-
- def _get_conn_fc_wwpns(self, host_name):
- wwpns = []
- cmd = ['svcinfo', 'lsfabric', '-host', host_name]
- generator = self._port_conf_generator(cmd)
- header = next(generator, None)
- if not header:
- return wwpns
-
- for port_data in generator:
- try:
- wwpns.append(port_data['local_wwpn'])
- except KeyError as e:
- self._handle_keyerror('lsfabric', header)
-
- return wwpns
-
- def validate_connector(self, connector):
- """Check connector for at least one enabled protocol (iSCSI/FC)."""
- valid = False
- if 'iSCSI' in self._enabled_protocols and 'initiator' in connector:
- valid = True
- if 'FC' in self._enabled_protocols and 'wwpns' in connector:
- valid = True
- if not valid:
- err_msg = (_('The connector does not contain the required '
- 'information.'))
- LOG.error(err_msg)
- raise exception.VolumeBackendAPIException(data=err_msg)
-
- def initialize_connection(self, volume, connector):
- """Perform the necessary work so that an iSCSI/FC connection can
- be made.
-
- To be able to create an iSCSI/FC connection from a given host to a
- volume, we must:
- 1. Translate the given iSCSI name or WWNN to a host name
- 2. Create new host on the storage system if it does not yet exist
- 3. Map the volume to the host if it is not already done
- 4. Return the connection information for relevant nodes (in the
- proper I/O group)
-
- """
-
- LOG.debug(_('enter: initialize_connection: volume %(vol)s with '
- 'connector %(conn)s') % {'vol': str(volume),
- 'conn': str(connector)})
-
- vol_opts = self._get_vdisk_params(volume['volume_type_id'])
- host_name = connector['host']
- volume_name = volume['name']
-
- # Check if a host object is defined for this host name
- host_name = self._get_host_from_connector(connector)
- if host_name is None:
- # Host does not exist - add a new host to Storwize/SVC
- host_name = self._create_host(connector)
- # Verify that create_new_host succeeded
- self._driver_assert(
- host_name is not None,
- _('_create_host failed to return the host name.'))
-
- if vol_opts['protocol'] == 'iSCSI':
- chap_secret = self._get_chap_secret_for_host(host_name)
- chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled
- if chap_enabled and chap_secret is None:
- chap_secret = self._add_chapsecret_to_host(host_name)
- elif not chap_enabled and chap_secret:
- LOG.warning(_('CHAP secret exists for host but CHAP is '
- 'disabled'))
-
- volume_attributes = self._get_vdisk_attributes(volume_name)
- lun_id = self._map_vol_to_host(volume_name, host_name)
-
- self._driver_assert(volume_attributes is not None,
- _('initialize_connection: Failed to get attributes'
- ' for volume %s') % volume_name)
-
- try:
- preferred_node = volume_attributes['preferred_node_id']
- IO_group = volume_attributes['IO_group_id']
- except KeyError as e:
- LOG.error(_('Did not find expected column name in '
- 'lsvdisk: %s') % str(e))
- exception_msg = (_('initialize_connection: Missing volume '
- 'attribute for volume %s') % volume_name)
- raise exception.VolumeBackendAPIException(data=exception_msg)
-
- try:
- # Get preferred node and other nodes in I/O group
- preferred_node_entry = None
- io_group_nodes = []
- for k, node in self._storage_nodes.iteritems():
- if vol_opts['protocol'] not in node['enabled_protocols']:
- continue
- if node['id'] == preferred_node:
- preferred_node_entry = node
- if node['IO_group'] == IO_group:
- io_group_nodes.append(node)
-
- if not len(io_group_nodes):
- exception_msg = (_('initialize_connection: No node found in '
- 'I/O group %(gid)s for volume %(vol)s') %
- {'gid': IO_group, 'vol': volume_name})
- LOG.error(exception_msg)
- raise exception.VolumeBackendAPIException(data=exception_msg)
-
- if not preferred_node_entry and not vol_opts['multipath']:
- # Get 1st node in I/O group
- preferred_node_entry = io_group_nodes[0]
- LOG.warn(_('initialize_connection: Did not find a preferred '
- 'node for volume %s') % volume_name)
-
- properties = {}
- properties['target_discovered'] = False
- properties['target_lun'] = lun_id
- properties['volume_id'] = volume['id']
- if vol_opts['protocol'] == 'iSCSI':
- type_str = 'iscsi'
- if len(preferred_node_entry['ipv4']):
- ipaddr = preferred_node_entry['ipv4'][0]
- else:
- ipaddr = preferred_node_entry['ipv6'][0]
- properties['target_portal'] = '%s:%s' % (ipaddr, '3260')
- properties['target_iqn'] = preferred_node_entry['iscsi_name']
- if chap_secret:
- properties['auth_method'] = 'CHAP'
- properties['auth_username'] = connector['initiator']
- properties['auth_password'] = chap_secret
- else:
- type_str = 'fibre_channel'
- conn_wwpns = self._get_conn_fc_wwpns(host_name)
- if len(conn_wwpns) == 0:
- msg = (_('Could not get FC connection information for the '
- 'host-volume connection. Is the host configured '
- 'properly for FC connections?'))
- LOG.error(msg)
- raise exception.VolumeBackendAPIException(data=msg)
- if not vol_opts['multipath']:
- if preferred_node_entry['WWPN'] in conn_wwpns:
- properties['target_wwn'] = preferred_node_entry['WWPN']
- else:
- properties['target_wwn'] = conn_wwpns[0]
- else:
- properties['target_wwn'] = conn_wwpns
- except Exception:
- with excutils.save_and_reraise_exception():
- self.terminate_connection(volume, connector)
- LOG.error(_('initialize_connection: Failed to collect return '
- 'properties for volume %(vol)s and connector '
- '%(conn)s.\n') % {'vol': str(volume),
- 'conn': str(connector)})
-
- LOG.debug(_('leave: initialize_connection:\n volume: %(vol)s\n '
- 'connector %(conn)s\n properties: %(prop)s')
- % {'vol': str(volume),
- 'conn': str(connector),
- 'prop': str(properties)})
-
- return {'driver_volume_type': type_str, 'data': properties, }
-
- def terminate_connection(self, volume, connector, **kwargs):
- """Cleanup after an iSCSI connection has been terminated.
-
- When we clean up a terminated connection between a given connector
- and volume, we:
- 1. Translate the given connector to a host name
- 2. Remove the volume-to-host mapping if it exists
- 3. Delete the host if it has no more mappings (hosts are created
- automatically by this driver when mappings are created)
- """
- LOG.debug(_('enter: terminate_connection: volume %(vol)s with '
- 'connector %(conn)s') % {'vol': str(volume),
- 'conn': str(connector)})
-
- vol_name = volume['name']
- if 'host' in connector:
- host_name = self._get_host_from_connector(connector)
- self._driver_assert(
- host_name is not None,
- _('_get_host_from_connector failed to return the host name '
- 'for connector'))
- else:
- # See bug #1244257
- host_name = None
-
- # Check if vdisk-host mapping exists, remove if it does. If no host
- # name was given, but only one mapping exists, we can use that.
- mapping_data = self._get_vdiskhost_mappings(vol_name)
- if len(mapping_data) == 0:
- LOG.warning(_('terminate_connection: No mapping of volume '
- '%(vol_name)s to any host found.') %
- {'vol_name': vol_name})
- return
- if host_name is None:
- if len(mapping_data) > 1:
- LOG.warning(_('terminate_connection: Multiple mappings of '
- 'volume %(vol_name)s found, no host '
- 'specified.') % {'vol_name': vol_name})
- return
- else:
- host_name = mapping_data.keys()[0]
- else:
- if host_name not in mapping_data:
- LOG.error(_('terminate_connection: No mapping of volume '
- '%(vol_name)s to host %(host_name)s found') %
- {'vol_name': vol_name, 'host_name': host_name})
- return
-
- # We have a valid host_name now
- ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', host_name,
- vol_name]
- out, err = self._run_ssh(ssh_cmd)
- # Verify CLI behaviour - no output is returned from rmvdiskhostmap
- self._assert_ssh_return(len(out.strip()) == 0,
- 'terminate_connection', ssh_cmd, out, err)
-
- # If this host has no more mappings, delete it
- mapping_data = self._get_hostvdisk_mappings(host_name)
- if not mapping_data:
- self._delete_host(host_name)
-
- LOG.debug(_('leave: terminate_connection: volume %(vol)s with '
- 'connector %(conn)s') % {'vol': str(volume),
- 'conn': str(connector)})
-
- """====================================================================="""
- """ VOLUMES/SNAPSHOTS """
- """====================================================================="""
-
- def _get_vdisk_attributes(self, vdisk_name):
- """Return vdisk attributes, or None if vdisk does not exist
-
- Exception is raised if the information from system can not be
- parsed/matched to a single vdisk.
- """
-
- ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!', vdisk_name]
- return self._execute_command_and_parse_attributes(ssh_cmd)
-
- def _get_vdisk_fc_mappings(self, vdisk_name):
- """Return FlashCopy mappings that this vdisk is associated with."""
-
- ssh_cmd = ['svcinfo', 'lsvdiskfcmappings', '-nohdr', vdisk_name]
- out, err = self._run_ssh(ssh_cmd)
-
- mapping_ids = []
- if (len(out.strip())):
- lines = out.strip().split('\n')
- mapping_ids = [line.split()[0] for line in lines]
- return mapping_ids
-
- def _get_vdisk_params(self, type_id, volume_type=None):
- opts = self._build_default_opts()
- if volume_type is None and type_id is not None:
- ctxt = context.get_admin_context()
- volume_type = volume_types.get_volume_type(ctxt, type_id)
- if volume_type:
- specs = dict(volume_type).get('extra_specs')
- for k, value in specs.iteritems():
- # Get the scope, if using scope format
- key_split = k.split(':')
- if len(key_split) == 1:
- scope = None
- key = key_split[0]
- else:
- scope = key_split[0]
- key = key_split[1]
-
- # We generally do not look at capabilities in the driver, but
- # protocol is a special case where the user asks for a given
- # protocol and we want both the scheduler and the driver to act
- # on the value.
- if scope == 'capabilities' and key == 'storage_protocol':
- scope = None
- key = 'protocol'
- words = value.split()
- self._driver_assert(words and
- len(words) == 2 and
- words[0] == '<in>',
- _('protocol must be specified as '
- '\'<in> iSCSI\' or \'<in> FC\''))
- del words[0]
- value = words[0]
-
- # Any keys that the driver should look at should have the
- # 'drivers' scope.
- if scope and scope != "drivers":
- continue
-
- if key in opts:
- this_type = type(opts[key]).__name__
- if this_type == 'int':
- value = int(value)
- elif this_type == 'bool':
- value = strutils.bool_from_string(value)
- opts[key] = value
-
- self._check_vdisk_opts(opts)
- return opts
-
- def _create_vdisk(self, name, size, units, opts):
- """Create a new vdisk."""
-
- LOG.debug(_('enter: _create_vdisk: vdisk %s ') % name)
-
- model_update = None
- params = self._get_vdisk_create_params(opts)
-
- ssh_cmd = ['svctask', 'mkvdisk', '-name', name, '-mdiskgrp',
- self.configuration.storwize_svc_volpool_name,
- '-iogrp', str(opts['iogrp']), '-size', size, '-unit',
- units] + params
- out, err = self._run_ssh(ssh_cmd)
- self._assert_ssh_return(len(out.strip()), '_create_vdisk',
- ssh_cmd, out, err)
-
- # Ensure that the output is as expected
- match_obj = re.search('Virtual Disk, id \[([0-9]+)\], '
- 'successfully created', out)
- # Make sure we got a "successfully created" message with vdisk id
- self._driver_assert(
- match_obj is not None,
- _('_create_vdisk %(name)s - did not find '
- 'success message in CLI output.\n '
- 'stdout: %(out)s\n stderr: %(err)s')
- % {'name': name, 'out': str(out), 'err': str(err)})
-
- LOG.debug(_('leave: _create_vdisk: volume %s ') % name)
-
- def _make_fc_map(self, source, target, full_copy):
- fc_map_cli_cmd = ['svctask', 'mkfcmap', '-source', source, '-target',
- target, '-autodelete']
- if not full_copy:
- fc_map_cli_cmd.extend(['-copyrate', '0'])
- out, err = self._run_ssh(fc_map_cli_cmd)
- self._driver_assert(
- len(out.strip()),
- _('create FC mapping from %(source)s to %(target)s - '
- 'did not find success message in CLI output.\n'
- ' stdout: %(out)s\n stderr: %(err)s\n')
- % {'source': source,
- 'target': target,
- 'out': str(out),
- 'err': str(err)})
-
- # Ensure that the output is as expected
- match_obj = re.search('FlashCopy Mapping, id \[([0-9]+)\], '
- 'successfully created', out)
- # Make sure we got a "successfully created" message with vdisk id
- self._driver_assert(
- match_obj is not None,
- _('create FC mapping from %(source)s to %(target)s - '
- 'did not find success message in CLI output.\n'
- ' stdout: %(out)s\n stderr: %(err)s\n')
- % {'source': source,
- 'target': target,
- 'out': str(out),
- 'err': str(err)})
-
- try:
- fc_map_id = match_obj.group(1)
- self._driver_assert(
- fc_map_id is not None,
- _('create FC mapping from %(source)s to %(target)s - '
- 'did not find mapping id in CLI output.\n'
- ' stdout: %(out)s\n stderr: %(err)s\n')
- % {'source': source,
- 'target': target,
- 'out': str(out),
- 'err': str(err)})
- except IndexError:
- self._driver_assert(
- False,
- _('create FC mapping from %(source)s to %(target)s - '
- 'did not find mapping id in CLI output.\n'
- ' stdout: %(out)s\n stderr: %(err)s\n')
- % {'source': source,
- 'target': target,
- 'out': str(out),
- 'err': str(err)})
- return fc_map_id
-
- def _call_prepare_fc_map(self, fc_map_id, source, target):
- try:
- out, err = self._run_ssh(['svctask', 'prestartfcmap', fc_map_id])
- except processutils.ProcessExecutionError as e:
- with excutils.save_and_reraise_exception():
- LOG.error(_('_prepare_fc_map: Failed to prepare FlashCopy '
- 'from %(source)s to %(target)s.\n'
- 'stdout: %(out)s\n stderr: %(err)s')
- % {'source': source,
- 'target': target,
- 'out': e.stdout,
- 'err': e.stderr})
-
- def _prepare_fc_map(self, fc_map_id, source, target):
- self._call_prepare_fc_map(fc_map_id, source, target)
- mapping_ready = False
- wait_time = 5
- # Allow waiting of up to timeout (set as parameter)
- timeout = self.configuration.storwize_svc_flashcopy_timeout
- max_retries = (timeout / wait_time) + 1
- for try_number in range(1, max_retries):
- mapping_attrs = self._get_flashcopy_mapping_attributes(fc_map_id)
- if (mapping_attrs is None or
- 'status' not in mapping_attrs):
- break
- if mapping_attrs['status'] == 'prepared':
- mapping_ready = True
- break
- elif mapping_attrs['status'] == 'stopped':
- self._call_prepare_fc_map(fc_map_id, source, target)
- elif mapping_attrs['status'] != 'preparing':
- # Unexpected mapping status
- exception_msg = (_('Unexecpted mapping status %(status)s '
- 'for mapping %(id)s. Attributes: '
- '%(attr)s')
- % {'status': mapping_attrs['status'],
- 'id': fc_map_id,
- 'attr': mapping_attrs})
- LOG.error(exception_msg)
- raise exception.VolumeBackendAPIException(data=exception_msg)
- # Need to wait for mapping to be prepared, wait a few seconds
- time.sleep(wait_time)
-
- if not mapping_ready:
- exception_msg = (_('Mapping %(id)s prepare failed to complete '
- 'within the allotted %(to)d seconds timeout. '
- 'Terminating.')
- % {'id': fc_map_id,
- 'to': timeout})
- LOG.error(_('_prepare_fc_map: Failed to start FlashCopy '
- 'from %(source)s to %(target)s with '
- 'exception %(ex)s')
- % {'source': source,
- 'target': target,
- 'ex': exception_msg})
- raise exception.InvalidSnapshot(
- reason=_('_prepare_fc_map: %s') % exception_msg)
-
- def _start_fc_map(self, fc_map_id, source, target):
- try:
- out, err = self._run_ssh(['svctask', 'startfcmap', fc_map_id])
- except processutils.ProcessExecutionError as e:
- with excutils.save_and_reraise_exception():
- LOG.error(_('_start_fc_map: Failed to start FlashCopy '
- 'from %(source)s to %(target)s.\n'
- 'stdout: %(out)s\n stderr: %(err)s')
- % {'source': source,
- 'target': target,
- 'out': e.stdout,
- 'err': e.stderr})
-
- def _run_flashcopy(self, source, target, full_copy=True):
- """Create a FlashCopy mapping from the source to the target."""
-
- LOG.debug(_('enter: _run_flashcopy: execute FlashCopy from source '
- '%(source)s to target %(target)s') %
- {'source': source, 'target': target})
-
- fc_map_id = self._make_fc_map(source, target, full_copy)
- try:
- self._prepare_fc_map(fc_map_id, source, target)
- self._start_fc_map(fc_map_id, source, target)
- except Exception:
- with excutils.save_and_reraise_exception():
- self._delete_vdisk(target, True)
-
- LOG.debug(_('leave: _run_flashcopy: FlashCopy started from '
- '%(source)s to %(target)s') %
- {'source': source, 'target': target})
-
- def _create_copy(self, src_vdisk, tgt_vdisk, full_copy, opts, src_id,
- from_vol):
- """Create a new snapshot using FlashCopy."""
-
- LOG.debug(_('enter: _create_copy: snapshot %(tgt_vdisk)s from '
- 'vdisk %(src_vdisk)s') %
- {'tgt_vdisk': tgt_vdisk, 'src_vdisk': src_vdisk})
-
- src_vdisk_attributes = self._get_vdisk_attributes(src_vdisk)
- if src_vdisk_attributes is None:
- exception_msg = (
- _('_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) '
- 'does not exist')
- % {'src_vdisk': src_vdisk, 'src_id': src_id})
- LOG.error(exception_msg)
- if from_vol:
- raise exception.VolumeNotFound(volume_id=src_id)
- else:
- raise exception.SnapshotNotFound(snapshot_id=src_id)
-
- self._driver_assert(
- 'capacity' in src_vdisk_attributes,
- _('_create_copy: cannot get source vdisk '
- '%(src)s capacity from vdisk attributes '
- '%(attr)s')
- % {'src': src_vdisk,
- 'attr': src_vdisk_attributes})
-
- src_vdisk_size = src_vdisk_attributes['capacity']
- self._create_vdisk(tgt_vdisk, src_vdisk_size, 'b', opts)
- self._run_flashcopy(src_vdisk, tgt_vdisk, full_copy)
-
- LOG.debug(_('leave: _create_copy: snapshot %(tgt_vdisk)s from '
- 'vdisk %(src_vdisk)s') %
- {'tgt_vdisk': tgt_vdisk, 'src_vdisk': src_vdisk})
-
- def _get_flashcopy_mapping_attributes(self, fc_map_id):
- LOG.debug(_('enter: _get_flashcopy_mapping_attributes: mapping %s')
- % fc_map_id)
-
- fc_ls_map_cmd = ['svcinfo', 'lsfcmap', '-filtervalue',
- 'id=%s' % fc_map_id, '-delim', '!']
- out, err = self._run_ssh(fc_ls_map_cmd)
- if not len(out.strip()):
- return None
-
- # Get list of FlashCopy mappings
- # We expect zero or one line if mapping does not exist,
- # two lines if it does exist, otherwise error
- lines = out.strip().split('\n')
- self._assert_ssh_return(len(lines) <= 2,
- '_get_flashcopy_mapping_attributes',
- fc_ls_map_cmd, out, err)
-
- if len(lines) == 2:
- attributes = self._get_hdr_dic(lines[0], lines[1], '!')
- else: # 0 or 1 lines
- attributes = None
-
- LOG.debug(_('leave: _get_flashcopy_mapping_attributes: mapping '
- '%(fc_map_id)s, attributes %(attributes)s') %
- {'fc_map_id': fc_map_id, 'attributes': attributes})
-
- return attributes
-
- def _is_vdisk_defined(self, vdisk_name):
- """Check if vdisk is defined."""
- LOG.debug(_('enter: _is_vdisk_defined: vdisk %s ') % vdisk_name)
- vdisk_attributes = self._get_vdisk_attributes(vdisk_name)
- LOG.debug(_('leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s ')
- % {'vol': vdisk_name,
- 'str': vdisk_attributes is not None})
- if vdisk_attributes is None:
- return False
- else:
- return True
-
- def _ensure_vdisk_no_fc_mappings(self, name, allow_snaps=True):
- """Ensure vdisk has no flashcopy mappings."""
- timer = loopingcall.FixedIntervalLoopingCall(
- self._check_vdisk_fc_mappings, name, allow_snaps)
- # Create a timer greenthread. The default volume service heart
- # beat is every 10 seconds. The flashcopy usually takes hours
- # before it finishes. Don't set the sleep interval shorter
- # than the heartbeat. Otherwise volume service heartbeat
- # will not be serviced.
- LOG.debug(_('Calling _ensure_vdisk_no_fc_mappings: vdisk %s')
- % name)
- ret = timer.start(interval=CHECK_FCMAPPING_INTERVAL).wait()
- timer.stop()
- return ret
-
- def _check_vdisk_fc_mappings(self, name, allow_snaps=True):
- """FlashCopy mapping check helper."""
-
- LOG.debug(_('Loopcall: _check_vdisk_fc_mappings(), vdisk %s') % name)
- mapping_ids = self._get_vdisk_fc_mappings(name)
- wait_for_copy = False
- for map_id in mapping_ids:
- attrs = self._get_flashcopy_mapping_attributes(map_id)
- if not attrs:
- continue
- source = attrs['source_vdisk_name']
- target = attrs['target_vdisk_name']
- copy_rate = attrs['copy_rate']
- status = attrs['status']
-
- if copy_rate == '0':
- # Case #2: A vdisk that has snapshots. Return
- # False if snapshot is not allowed.
- if source == name:
- if not allow_snaps:
- raise loopingcall.LoopingCallDone(retvalue=False)
- ssh_cmd = ['svctask', 'chfcmap', '-copyrate', '50',
- '-autodelete', 'on', map_id]
- out, err = self._run_ssh(ssh_cmd)
- wait_for_copy = True
- # Case #3: A snapshot
- else:
- msg = (_('Vdisk %(name)s not involved in '
- 'mapping %(src)s -> %(tgt)s') %
- {'name': name, 'src': source, 'tgt': target})
- self._driver_assert(target == name, msg)
- if status in ['copying', 'prepared']:
- self._run_ssh(['svctask', 'stopfcmap', map_id])
- # Need to wait for the fcmap to change to
- # stopped state before remove fcmap
- wait_for_copy = True
- elif status in ['stopping', 'preparing']:
- wait_for_copy = True
- else:
- self._run_ssh(['svctask', 'rmfcmap', '-force',
- map_id])
- # Case 4: Copy in progress - wait and will autodelete
- else:
- if status == 'prepared':
- self._run_ssh(['svctask', 'stopfcmap', map_id])
- self._run_ssh(['svctask', 'rmfcmap', '-force', map_id])
- elif status == 'idle_or_copied':
- # Prepare failed
- self._run_ssh(['svctask', 'rmfcmap', '-force', map_id])
- else:
- wait_for_copy = True
- if not wait_for_copy or not len(mapping_ids):
- raise loopingcall.LoopingCallDone(retvalue=True)
-
- def _delete_vdisk(self, name, force):
- """Deletes existing vdisks.
-
- It is very important to properly take care of mappings before deleting
- the disk:
- 1. If no mappings, then it was a vdisk, and can be deleted
- 2. If it is the source of a flashcopy mapping and copy_rate is 0, then
- it is a vdisk that has a snapshot. If the force flag is set,
- delete the mapping and the vdisk, otherwise set the mapping to
- copy and wait (this will allow users to delete vdisks that have
- snapshots if/when the upper layers allow it).
- 3. If it is the target of a mapping and copy_rate is 0, it is a
- snapshot, and we should properly stop the mapping and delete.
- 4. If it is the source/target of a mapping and copy_rate is not 0, it
- is a clone or vdisk created from a snapshot. We wait for the copy
- to complete (the mapping will be autodeleted) and then delete the
- vdisk.
-
- """
-
- LOG.debug(_('enter: _delete_vdisk: vdisk %s') % name)
-
- # Try to delete volume only if found on the storage
- vdisk_defined = self._is_vdisk_defined(name)
- if not vdisk_defined:
- LOG.info(_('warning: Tried to delete vdisk %s but it does not '
- 'exist.') % name)
- return
-
- self._ensure_vdisk_no_fc_mappings(name)
-
- ssh_cmd = ['svctask', 'rmvdisk', '-force', name]
- if not force:
- ssh_cmd.remove('-force')
- out, err = self._run_ssh(ssh_cmd)
- # No output should be returned from rmvdisk
- self._assert_ssh_return(len(out.strip()) == 0,
- ('_delete_vdisk %(name)s')
- % {'name': name},
- ssh_cmd, out, err)
- LOG.debug(_('leave: _delete_vdisk: vdisk %s') % name)
-
- def create_volume(self, volume):
- opts = self._get_vdisk_params(volume['volume_type_id'])
- return self._create_vdisk(volume['name'], str(volume['size']), 'gb',
- opts)
-
- def delete_volume(self, volume):
- self._delete_vdisk(volume['name'], False)
-
- def create_snapshot(self, snapshot):
- source_vol = self.db.volume_get(self._context, snapshot['volume_id'])
- opts = self._get_vdisk_params(source_vol['volume_type_id'])
- self._create_copy(src_vdisk=snapshot['volume_name'],
- tgt_vdisk=snapshot['name'],
- full_copy=False,
- opts=opts,
- src_id=snapshot['volume_id'],
- from_vol=True)
-
- def delete_snapshot(self, snapshot):
- self._delete_vdisk(snapshot['name'], False)
-
- def create_volume_from_snapshot(self, volume, snapshot):
- if volume['size'] != snapshot['volume_size']:
- exception_message = (_('create_volume_from_snapshot: '
- 'Source and destination size differ.'))
- LOG.error(exception_message)
- raise exception.VolumeBackendAPIException(data=exception_message)
-
- opts = self._get_vdisk_params(volume['volume_type_id'])
- self._create_copy(src_vdisk=snapshot['name'],
- tgt_vdisk=volume['name'],
- full_copy=True,
- opts=opts,
- src_id=snapshot['id'],
- from_vol=False)
-
- def create_cloned_volume(self, tgt_volume, src_volume):
- if src_volume['size'] != tgt_volume['size']:
- exception_message = (_('create_cloned_volume: '
- 'Source and destination size differ.'))
- LOG.error(exception_message)
- raise exception.VolumeBackendAPIException(data=exception_message)
-
- opts = self._get_vdisk_params(tgt_volume['volume_type_id'])
- self._create_copy(src_vdisk=src_volume['name'],
- tgt_vdisk=tgt_volume['name'],
- full_copy=True,
- opts=opts,
- src_id=src_volume['id'],
- from_vol=True)
-
- def extend_volume(self, volume, new_size):
- LOG.debug(_('enter: extend_volume: volume %s') % volume['id'])
- ret = self._ensure_vdisk_no_fc_mappings(volume['name'],
- allow_snaps=False)
- if not ret:
- exception_message = (_('extend_volume: Extending a volume with '
- 'snapshots is not supported.'))
- LOG.error(exception_message)
- raise exception.VolumeBackendAPIException(data=exception_message)
-
- extend_amt = int(new_size) - volume['size']
- ssh_cmd = (['svctask', 'expandvdisksize', '-size', str(extend_amt),
- '-unit', 'gb', volume['name']])
- out, err = self._run_ssh(ssh_cmd)
- # No output should be returned from expandvdisksize
- self._assert_ssh_return(len(out.strip()) == 0, 'extend_volume',
- ssh_cmd, out, err)
- LOG.debug(_('leave: extend_volume: volume %s') % volume['id'])
-
- def _add_vdisk_copy(self, volume, dest_pool, opts):
- """Create a vdisk copy for the given volume."""
- params = self._get_vdisk_create_params(opts)
- ssh_cmd = (['svctask', 'addvdiskcopy'] + params + ['-mdiskgrp',
- dest_pool, volume['name']])
- out, err = self._run_ssh(ssh_cmd)
- self._assert_ssh_return(len(out.strip()), '_add_vdisk_copy',
- ssh_cmd, out, err)
-
- # Ensure that the output is as expected
- match_obj = re.search('Vdisk \[([0-9]+)\] copy \[([0-9]+)\] '
- 'successfully created', out)
- # Make sure we got a "successfully created" message with copy id
- self._driver_assert(
- match_obj is not None,
- _('_add_vdisk_copy %(name)s - did not find '
- 'success message in CLI output.\n '
- 'stdout: %(out)s\n stderr: %(err)s')
- % {'name': volume['name'], 'out': str(out), 'err': str(err)})
-
- copy_id = match_obj.group(2)
- return copy_id
-
- def _get_vdisk_copy_attrs(self, volume, copy_id):
- ssh_cmd = ['svcinfo', 'lsvdiskcopy', '-delim', '!', '-copy',
- copy_id, volume['name']]
- attrs = self._execute_command_and_parse_attributes(ssh_cmd)
- if not attrs:
- msg = (_('_get_vdisk_copy_attrs: Could not get vdisk '
- 'copy data'))
- LOG.error(msg)
- raise exception.VolumeBackendAPIException(data=msg)
- return attrs
-
- def _wait_for_vdisk_copy_sync(self, volume, copy_id):
- sync = False
- while not sync:
- attrs = self._get_vdisk_copy_attrs(volume, copy_id)
- if attrs['sync'] == 'yes':
- sync = True
- else:
- time.sleep(5)
-
- def _remove_vdisk_copy(self, volume, copy_id):
- ssh_cmd = ['svctask', 'rmvdiskcopy', '-copy', copy_id,
- volume['name']]
- out, err = self._run_ssh(ssh_cmd)
- # No output should be returned from rmvdiskcopy
- self._assert_ssh_return(len(out.strip()) == 0, '_remove_vdisk_copy',
- ssh_cmd, out, err)
-
- def _migrate_volume_vdiskcopy(self, volume, dest_pool, volume_type):
- """Migrate a volume using addvdiskcopy and rmvdiskcopy.
-
- This will add a vdisk copy with the given volume type in the given
- pool, wait until it syncs, and delete the original copy.
- """
- this_pool = self.configuration.storwize_svc_volpool_name
- orig_copy_id = self._find_vdisk_copy_id(this_pool, volume['name'])
- self._driver_assert(orig_copy_id is not None,
- _('migrate_volume started without a vdisk '
- 'copy in the expected pool.'))
-
- if volume_type is None:
- opts = self._get_vdisk_params(None)
- else:
- opts = self._get_vdisk_params(volume_type['id'],
- volume_type=volume_type)
- new_copy_id = self._add_vdisk_copy(volume, dest_pool, opts)
- self._wait_for_vdisk_copy_sync(volume, new_copy_id)
- self._remove_vdisk_copy(volume, orig_copy_id)
-
- def _same_host(self, host):
- dest_location = host['capabilities'].get('location_info')
- if self._stats['location_info'] == dest_location:
- return True
- return False
-
- def _can_migrate_to_host(self, host):
- if 'location_info' not in host['capabilities']:
- return None
- info = host['capabilities']['location_info']
- try:
- (dest_type, dest_id, dest_pool) = info.split(':')
- except ValueError:
- return None
- if (dest_type != 'StorwizeSVCDriver' or dest_id != self._system_id):
- return None
- return dest_pool
-
- @staticmethod
- def _convert_vdisk_parameters(opts):
- if 'iogrp' in opts:
- opts['iogrp'] = str(opts['iogrp'])
- if 'warning' in opts:
- opts['warning'] = '%s%%' % str(opts['warning'])
- if 'easytier' in opts:
- opts['easytier'] = 'on' if opts['easytier'] else 'off'
- if 'autoexpand' in opts:
- opts['autoexpand'] = 'on' if opts['autoexpand'] else 'off'
- return opts
-
- def migrate_volume(self, ctxt, volume, host):
- """Migrate directly if source and dest are managed by same storage.
-
- The method uses the migratevdisk method, which returns almost
- immediately, if the source and target pools have the same extent_size.
- Otherwise, it uses addvdiskcopy and rmvdiskcopy, which require waiting
- for the copy operation to complete.
-
- :param ctxt: Context
- :param volume: A dictionary describing the volume to migrate
- :param host: A dictionary describing the host to migrate to, where
- host['host'] is its name, and host['capabilities'] is a
- dictionary of its reported capabilities.
- """
- LOG.debug(_('enter: migrate_volume: id=%(id)s, host=%(host)s') %
- {'id': volume['id'], 'host': host['host']})
-
- false_ret = (False, None)
- dest_pool = self._can_migrate_to_host(host)
- if dest_pool is None:
- return false_ret
-
- if 'extent_size' not in host['capabilities']:
- return false_ret
- if host['capabilities']['extent_size'] == self._extent_size:
- # If source and dest pools have the same extent size, migratevdisk
- ssh_cmd = ['svctask', 'migratevdisk', '-mdiskgrp', dest_pool,
- '-vdisk', volume['name']]
- out, err = self._run_ssh(ssh_cmd)
- # No output should be returned from migratevdisk
- self._assert_ssh_return(len(out.strip()) == 0, 'migrate_volume',
- ssh_cmd, out, err)
- else:
- # If source and dest pool extent size differ, add/delete vdisk copy
-
- ctxt = context.get_admin_context()
- if volume['volume_type_id'] is not None:
- volume_type_id = volume['volume_type_id']
- vol_type = volume_types.get_volume_type(ctxt, volume_type_id)
- else:
- vol_type = None
- self._migrate_volume_vdiskcopy(volume, dest_pool, vol_type)
-
- LOG.debug(_('leave: migrate_volume: id=%(id)s, host=%(host)s') %
- {'id': volume['id'], 'host': host['host']})
- return (True, None)
-
- def retype(self, ctxt, volume, new_type, diff, host):
- """Convert the volume to be of the new type.
-
- Returns a boolean indicating whether the retype occurred.
-
- :param ctxt: Context
- :param volume: A dictionary describing the volume to migrate
- :param new_type: A dictionary describing the volume type to convert to
- :param diff: A dictionary with the difference between the two types
- :param host: A dictionary describing the host to migrate to, where
- host['host'] is its name, and host['capabilities'] is a
- dictionary of its reported capabilities.
- """
- LOG.debug(_('enter: retype: id=%(id)s, new_type=%(new_type)s,'
- 'diff=%(diff)s, host=%(host)s') % {'id': volume['id'],
- 'new_type': new_type,
- 'diff': diff,
- 'host': host})
-
- ignore_keys = ['protocol', 'multipath']
- no_copy_keys = ['warning', 'autoexpand', 'easytier', 'iogrp']
- copy_keys = ['rsize', 'grainsize', 'compression']
- all_keys = ignore_keys + no_copy_keys + copy_keys
- old_opts = self._get_vdisk_params(volume['volume_type_id'])
- new_opts = self._get_vdisk_params(new_type['id'],
- volume_type=new_type)
-
- vdisk_changes = []
- need_copy = False
- for key in all_keys:
- if old_opts[key] != new_opts[key]:
- if key in copy_keys:
- need_copy = True
- break
- elif key in no_copy_keys:
- vdisk_changes.append(key)
-
- if not self._same_host(host):
- need_copy = True
-
- if need_copy:
- dest_pool = self._can_migrate_to_host(host)
- if dest_pool is None:
- return False
-
- self._migrate_volume_vdiskcopy(volume, dest_pool, new_type)
- else:
- new_opts = self._convert_vdisk_parameters(new_opts)
- if 'iogrp' in vdisk_changes:
- vdisk_changes.remove('iogrp')
- if self._code_level < (6, 4, 0, 0):
- LOG.debug(_('Ignore change IO group as storage code level '
- 'is %(code_level)s, below then '
- '6.4.0.0') % {'code_level': self._code_level})
- else:
- ssh_cmd = (['svctask', 'movevdisk', '-iogrp',
- new_opts['iogrp']] + [volume['name']])
- out, err = self._run_ssh(ssh_cmd)
- self._assert_ssh_return(len(out.strip()) == 0,
- 'movevdisk', ssh_cmd, out, err)
- params = []
- for key in vdisk_changes:
- params.extend(['-' + key, new_opts[key]])
- if params:
- ssh_cmd = (['svctask', 'chvdisk'] + params + [volume['name']])
- out, err = self._run_ssh(ssh_cmd)
- self._assert_ssh_return(len(out.strip()) == 0,
- 'retype', ssh_cmd, out, err)
-
- LOG.debug(_('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
- 'diff=%(diff)s, host=%(host)s') % {'id': volume['id'],
- 'new_type': new_type,
- 'diff': diff,
- 'host': host['host']})
- return True
-
- def report_volume_migrate_status(self):
- pass
-
- """====================================================================="""
- """ MISC/HELPERS """
- """====================================================================="""
-
- def get_volume_stats(self, refresh=False):
- """Get volume stats.
-
- If we haven't gotten stats yet or 'refresh' is True,
- run update the stats first.
- """
- if not self._stats or refresh:
- self._update_volume_stats()
-
- return self._stats
-
- def _update_volume_stats(self):
- """Retrieve stats info from volume group."""
-
- LOG.debug(_("Updating volume stats"))
- data = {}
-
- data['vendor_name'] = 'IBM'
- data['driver_version'] = self.VERSION
- data['storage_protocol'] = list(self._enabled_protocols)
-
- data['total_capacity_gb'] = 0 # To be overwritten
- data['free_capacity_gb'] = 0 # To be overwritten
- data['reserved_percentage'] = self.configuration.reserved_percentage
- data['QoS_support'] = False
-
- pool = self.configuration.storwize_svc_volpool_name
- backend_name = self.configuration.safe_get('volume_backend_name')
- if not backend_name:
- backend_name = '%s_%s' % (self._system_name, pool)
- data['volume_backend_name'] = backend_name
-
- ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool]
- attributes = self._execute_command_and_parse_attributes(ssh_cmd)
- if not attributes:
- LOG.error(_('Could not get pool data from the storage'))
- exception_message = (_('_update_volume_stats: '
- 'Could not get storage pool data'))
- raise exception.VolumeBackendAPIException(data=exception_message)
-
- data['total_capacity_gb'] = (float(attributes['capacity']) /
- units.GiB)
- data['free_capacity_gb'] = (float(attributes['free_capacity']) /
- units.GiB)
- data['easytier_support'] = attributes['easy_tier'] in ['on', 'auto']
- data['compression_support'] = self._compression_enabled
- data['extent_size'] = self._extent_size
- data['location_info'] = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' %
- {'sys_id': self._system_id,
- 'pool': pool})
-
- self._stats = data
-
- def _get_pool_attrs(self, pool):
- ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool]
- attributes = self._execute_command_and_parse_attributes(ssh_cmd)
- if not attributes:
- msg = (_('_get_pool_attrs: Pool %s does not exist') % pool)
- LOG.error(msg)
- raise exception.InvalidInput(reason=msg)
- return attributes
-
- def _find_vdisk_copy_id(self, pool, name):
- copies_info = self._get_vdisk_copy_info(name)
- for cid, cinfo in copies_info.iteritems():
- if cinfo['mdisk_grp_name'] == pool:
- return cid
- return None
-
- def _port_conf_generator(self, cmd):
- ssh_cmd = cmd + ['-delim', '!']
- out, err = self._run_ssh(ssh_cmd)
-
- if not len(out.strip()):
- return
- port_lines = out.strip().split('\n')
- if not len(port_lines):
- return
-
- header = port_lines.pop(0)
- yield header
- for portip_line in port_lines:
- try:
- port_data = self._get_hdr_dic(header, portip_line, '!')
- except exception.VolumeBackendAPIException:
- with excutils.save_and_reraise_exception():
- self._log_cli_output_error('_port_conf_generator',
- ssh_cmd, out, err)
- yield port_data
-
- def _get_vdisk_copy_info(self, vdisk):
- ssh_cmd = ['svcinfo', 'lsvdiskcopy', '-delim', '!', vdisk]
- out, err = self._run_ssh(ssh_cmd)
-
- self._assert_ssh_return(len(out.strip()), '_get_vdisk_copy_info',
- ssh_cmd, out, err)
- copy_lines = out.strip().split('\n')
- self._assert_ssh_return(len(copy_lines), '_get_vdisk_copy_info',
- ssh_cmd, out, err)
-
- header = copy_lines.pop(0)
- ret = {}
- for copy_line in copy_lines:
- try:
- copy_data = self._get_hdr_dic(header, copy_line, '!')
- except exception.VolumeBackendAPIException:
- with excutils.save_and_reraise_exception():
- self._log_cli_output_error('_get_vdisk_copy_info',
- ssh_cmd, out, err)
- ret[copy_data['copy_id']] = copy_data
- return ret
-
- @staticmethod
- def _get_vdisk_create_params(opts):
- easytier = 'on' if opts['easytier'] else 'off'
-
- # Set space-efficient options
- if opts['rsize'] == -1:
- params = []
- else:
- params = ['-rsize', '%s%%' % str(opts['rsize']),
- '-autoexpand', '-warning',
- '%s%%' % str(opts['warning'])]
- if not opts['autoexpand']:
- params.remove('-autoexpand')
-
- if opts['compression']:
- params.append('-compressed')
- else:
- params.extend(['-grainsize', str(opts['grainsize'])])
-
- params.extend(['-easytier', easytier])
- return params
-
- def _check_vdisk_opts(self, opts):
- # Check that rsize is either -1 or between 0 and 100
- if not (opts['rsize'] >= -1 and opts['rsize'] <= 100):
- raise exception.InvalidInput(
- reason=_('Illegal value specified for storwize_svc_vol_rsize: '
- 'set to either a percentage (0-100) or -1'))
-
- # Check that warning is either -1 or between 0 and 100
- if not (opts['warning'] >= -1 and opts['warning'] <= 100):
- raise exception.InvalidInput(
- reason=_('Illegal value specified for '
- 'storwize_svc_vol_warning: '
- 'set to a percentage (0-100)'))
-
- # Check that grainsize is 32/64/128/256
- if opts['grainsize'] not in [32, 64, 128, 256]:
- raise exception.InvalidInput(
- reason=_('Illegal value specified for '
- 'storwize_svc_vol_grainsize: set to either '
- '32, 64, 128, or 256'))
-
- # Check that compression is supported
- if opts['compression'] and not self._compression_enabled:
- raise exception.InvalidInput(
- reason=_('System does not support compression'))
-
- # Check that rsize is set if compression is set
- if opts['compression'] and opts['rsize'] == -1:
- raise exception.InvalidInput(
- reason=_('If compression is set to True, rsize must '
- 'also be set (not equal to -1)'))
-
- # Check that the requested protocol is enabled
- if opts['protocol'] not in self._enabled_protocols:
- raise exception.InvalidInput(
- reason=_('Illegal value %(prot)s specified for '
- 'storwize_svc_connection_protocol: '
- 'valid values are %(enabled)s')
- % {'prot': opts['protocol'],
- 'enabled': ','.join(self._enabled_protocols)})
-
- if opts['iogrp'] not in self._available_iogrps:
- raise exception.InvalidInput(
- reason=_('I/O group %(iogrp)d is not valid; available '
- 'I/O groups are %(avail)s')
- % {'iogrp': opts['iogrp'],
- 'avail': ''.join(str(e) for e in self._available_iogrps)})
-
- def _execute_command_and_parse_attributes(self, ssh_cmd):
- """Execute command on the Storwize/SVC and parse attributes.
-
- Exception is raised if the information from the system
- can not be obtained.
-
- """
-
- LOG.debug(_('enter: _execute_command_and_parse_attributes: '
- ' command %s') % str(ssh_cmd))
-
- try:
- out, err = self._run_ssh(ssh_cmd)
- except processutils.ProcessExecutionError as e:
- # Didn't get details from the storage, return None
- LOG.error(_('CLI Exception output:\n command: %(cmd)s\n '
- 'stdout: %(out)s\n stderr: %(err)s') %
- {'cmd': ssh_cmd,
- 'out': e.stdout,
- 'err': e.stderr})
- return None
-
- self._assert_ssh_return(len(out),
- '_execute_command_and_parse_attributes',
- ssh_cmd, out, err)
- attributes = {}
- for attrib_line in out.split('\n'):
- # If '!' not found, return the string and two empty strings
- attrib_name, foo, attrib_value = attrib_line.partition('!')
- if attrib_name is not None and len(attrib_name.strip()):
- attributes[attrib_name] = attrib_value
-
- LOG.debug(_('leave: _execute_command_and_parse_attributes:\n'
- 'command: %(cmd)s\n'
- 'attributes: %(attr)s')
- % {'cmd': str(ssh_cmd),
- 'attr': str(attributes)})
-
- return attributes
-
- def _get_hdr_dic(self, header, row, delim):
- """Return CLI row data as a dictionary indexed by names from header.
- string. The strings are converted to columns using the delimiter in
- delim.
- """
-
- attributes = header.split(delim)
- values = row.split(delim)
- self._driver_assert(
- len(values) ==
- len(attributes),
- _('_get_hdr_dic: attribute headers and values do not match.\n '
- 'Headers: %(header)s\n Values: %(row)s')
- % {'header': str(header),
- 'row': str(row)})
- dic = dict((a, v) for a, v in map(None, attributes, values))
- return dic
-
- def _log_cli_output_error(self, function, cmd, out, err):
- LOG.error(_('%(fun)s: Failed with unexpected CLI output.\n '
- 'Command: %(cmd)s\nstdout: %(out)s\nstderr: %(err)s\n')
- % {'fun': function, 'cmd': cmd,
- 'out': str(out), 'err': str(err)})
-
- def _driver_assert(self, assert_condition, exception_message):
- """Internal assertion mechanism for CLI output."""
- if not assert_condition:
- LOG.error(exception_message)
- raise exception.VolumeBackendAPIException(data=exception_message)
-
- def _assert_ssh_return(self, test, fun, ssh_cmd, out, err):
- self._driver_assert(
- test,
- _('%(fun)s: Failed with unexpected CLI output.\n '
- 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'fun': fun,
- 'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
-
- def _handle_keyerror(self, function, header):
- msg = (_('Did not find expected column in %(fun)s: %(hdr)s') %
- {'fun': function, 'hdr': header})
- LOG.error(msg)
- raise exception.VolumeBackendAPIException(
- data=msg)
-
-
-class CLIResponse(object):
- '''Parse SVC CLI output and generate iterable.'''
-
- def __init__(self, raw, delim='!', with_header=True):
- super(CLIResponse, self).__init__()
- self.raw = raw
- self.delim = delim
- self.with_header = with_header
- self.result = self._parse()
-
- def select(self, *keys):
- for a in self.result:
- vs = []
- for k in keys:
- v = a.get(k, None)
- if isinstance(v, basestring):
- v = [v]
- if isinstance(v, list):
- vs.append(v)
- for item in zip(*vs):
- yield item
-
- def __getitem__(self, key):
- return self.result[key]
-
- def __iter__(self):
- for a in self.result:
- yield a
-
- def __len__(self):
- return len(self.result)
-
- def _parse(self):
- def get_reader(content, delim):
- for line in content.lstrip().splitlines():
- line = line.strip()
- if line:
- yield line.split(delim)
- else:
- yield []
-
- if isinstance(self.raw, basestring):
- stdout, stderr = self.raw, ''
- else:
- stdout, stderr = self.raw
- reader = get_reader(stdout, self.delim)
- result = []
-
- if self.with_header:
- hds = tuple()
- for row in reader:
- hds = row
- break
- for row in reader:
- cur = dict()
- for k, v in zip(hds, row):
- CLIResponse.append_dict(cur, k, v)
- result.append(cur)
- else:
- cur = dict()
- for row in reader:
- if row:
- CLIResponse.append_dict(cur, row[0], ' '.join(row[1:]))
- elif cur: # start new section
- result.append(cur)
- cur = dict()
- if cur:
- result.append(cur)
- return result
-
- @staticmethod
- def append_dict(dict_, key, value):
- key, value = key.strip(), value.strip()
- obj = dict_.get(key, None)
- if obj is None:
- dict_[key] = value
- elif isinstance(obj, list):
- obj.append(value)
- dict_[key] = obj
- else:
- dict_[key] = [obj, value]
- return dict_
'cinder.volume.drivers.solidfire.SolidFire':
'cinder.volume.drivers.solidfire.SolidFireDriver',
'cinder.volume.storwize_svc.StorwizeSVCDriver':
- 'cinder.volume.drivers.storwize_svc.StorwizeSVCDriver',
+ 'cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver',
+ 'cinder.volume.drivers.storwize_svc.StorwizeSVCDriver':
+ 'cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver',
'cinder.volume.windows.WindowsDriver':
'cinder.volume.drivers.windows.windows.WindowsDriver',
'cinder.volume.drivers.windows.WindowsDriver':
#cinder_huawei_conf_file=/etc/cinder/cinder_huawei_conf.xml
+#
+# Options defined in cinder.volume.drivers.ibm.storwize_svc
+#
+
+# Storage system storage pool for volumes (string value)
+#storwize_svc_volpool_name=volpool
+
+# Storage system space-efficiency parameter for volumes
+# (percentage) (integer value)
+#storwize_svc_vol_rsize=2
+
+# Storage system threshold for volume capacity warnings
+# (percentage) (integer value)
+#storwize_svc_vol_warning=0
+
+# Storage system autoexpand parameter for volumes (True/False)
+# (boolean value)
+#storwize_svc_vol_autoexpand=true
+
+# Storage system grain size parameter for volumes
+# (32/64/128/256) (integer value)
+#storwize_svc_vol_grainsize=256
+
+# Storage system compression option for volumes (boolean
+# value)
+#storwize_svc_vol_compression=false
+
+# Enable Easy Tier for volumes (boolean value)
+#storwize_svc_vol_easytier=true
+
+# The I/O group in which to allocate volumes (integer value)
+#storwize_svc_vol_iogrp=0
+
+# Maximum number of seconds to wait for FlashCopy to be
+# prepared. Maximum value is 600 seconds (10 minutes) (integer
+# value)
+#storwize_svc_flashcopy_timeout=120
+
+# Connection protocol (iSCSI/FC) (string value)
+#storwize_svc_connection_protocol=iSCSI
+
+# Configure CHAP authentication for iSCSI connections
+# (Default: Enabled) (boolean value)
+#storwize_svc_iscsi_chap_enabled=true
+
+# Connect with multipath (FC only; iSCSI multipath is
+# controlled by Nova) (boolean value)
+#storwize_svc_multipath_enabled=false
+
+# Allows vdisk to multi host mapping (boolean value)
+#storwize_svc_multihostmap_enabled=true
+
+
#
# Options defined in cinder.volume.drivers.lvm
#
#sf_api_port=443
-#
-# Options defined in cinder.volume.drivers.storwize_svc
-#
-
-# Storage system storage pool for volumes (string value)
-#storwize_svc_volpool_name=volpool
-
-# Storage system space-efficiency parameter for volumes
-# (percentage) (integer value)
-#storwize_svc_vol_rsize=2
-
-# Storage system threshold for volume capacity warnings
-# (percentage) (integer value)
-#storwize_svc_vol_warning=0
-
-# Storage system autoexpand parameter for volumes (True/False)
-# (boolean value)
-#storwize_svc_vol_autoexpand=true
-
-# Storage system grain size parameter for volumes
-# (32/64/128/256) (integer value)
-#storwize_svc_vol_grainsize=256
-
-# Storage system compression option for volumes (boolean
-# value)
-#storwize_svc_vol_compression=false
-
-# Enable Easy Tier for volumes (boolean value)
-#storwize_svc_vol_easytier=true
-
-# The I/O group in which to allocate volumes (integer value)
-#storwize_svc_vol_iogrp=0
-
-# Maximum number of seconds to wait for FlashCopy to be
-# prepared. Maximum value is 600 seconds (10 minutes) (integer
-# value)
-#storwize_svc_flashcopy_timeout=120
-
-# Connection protocol (iSCSI/FC) (string value)
-#storwize_svc_connection_protocol=iSCSI
-
-# Configure CHAP authentication for iSCSI connections
-# (Default: Enabled) (boolean value)
-#storwize_svc_iscsi_chap_enabled=true
-
-# Connect with multipath (FC only; iSCSI multipath is
-# controlled by Nova) (boolean value)
-#storwize_svc_multipath_enabled=false
-
-# Allows vdisk to multi host mapping (boolean value)
-#storwize_svc_multihostmap_enabled=true
-
-
#
# Options defined in cinder.volume.drivers.vmware.vmdk
#