--- /dev/null
+# Copyright 2014 IBM Corp.
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+Tests for the IBM FlashSystem volume driver.
+"""
+
+import mock
+from oslo.concurrency import processutils
+from oslo.utils import excutils
+from oslo.utils import units
+import six
+
+import random
+import re
+
+from cinder import context
+from cinder import exception
+from cinder.openstack.common import log as logging
+from cinder import test
+from cinder import utils
+from cinder.volume import configuration as conf
+from cinder.volume.drivers.ibm import flashsystem
+from cinder.volume import utils as volume_utils
+from cinder.volume import volume_types
+
+LOG = logging.getLogger(__name__)
+
+
+class FlashSystemManagementSimulator:
+ def __init__(self):
+ # Default protocol is FC
+ self._protocol = 'FC'
+ self._volumes_list = {}
+ self._hosts_list = {}
+ self._mappings_list = {}
+ self._next_cmd_error = {
+ 'lsnode': '',
+ 'lssystem': '',
+ 'lsmdiskgrp': ''
+ }
+ self._errors = {
+ # CMMVC50000 is a fake error which indicates that command has not
+ # got expected results. This error represents kinds of CLI errors.
+ 'CMMVC50000': ('', 'CMMVC50000 The command can not be executed '
+ 'successfully.')
+ }
+
+ @staticmethod
+ def _find_unused_id(d):
+ ids = []
+ for v in d.itervalues():
+ ids.append(int(v['id']))
+ ids.sort()
+ for index, n in enumerate(ids):
+ if n > index:
+ return six.text_type(index)
+ return six.text_type(len(ids))
+
+ @staticmethod
+ def _is_invalid_name(name):
+ if re.match(r'^[a-zA-Z_][\w ._-]*$', name):
+ return False
+ return True
+
+ @staticmethod
+ def _cmd_to_dict(arg_list):
+ no_param_args = [
+ 'bytes',
+ 'force'
+ ]
+ one_param_args = [
+ 'delim',
+ 'hbawwpn',
+ 'host',
+ 'iogrp',
+ 'iscsiname',
+ 'mdiskgrp',
+ 'name',
+ 'scsi',
+ 'size',
+ 'unit'
+ ]
+
+ # All commands should begin with svcinfo or svctask
+ if arg_list[0] not in ('svcinfo', 'svctask') or len(arg_list) < 2:
+ raise exception.InvalidInput(reason=six.text_type(arg_list))
+ ret = {'cmd': arg_list[1]}
+ arg_list.pop(0)
+
+ skip = False
+ for i in range(1, len(arg_list)):
+ if skip:
+ skip = False
+ continue
+ if arg_list[i][0] == '-':
+ param = arg_list[i][1:]
+ if param in no_param_args:
+ ret[param] = True
+ elif param in one_param_args:
+ ret[param] = arg_list[i + 1]
+ skip = True
+ else:
+ raise exception.InvalidInput(
+ reason=('unrecognized argument %s') % arg_list[i])
+ else:
+ ret['obj'] = arg_list[i]
+ return ret
+
+ @staticmethod
+ def _print_cmd_info(rows, delim=' ', nohdr=False, **kwargs):
+ """Generic function for printing information."""
+ if nohdr:
+ del rows[0]
+ for index in range(len(rows)):
+ rows[index] = delim.join(rows[index])
+ return ('%s' % '\n'.join(rows), '')
+
+ @staticmethod
+ def _convert_units_bytes(num, unit):
+ unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
+ unit_index = 0
+
+ while unit.lower() != unit_array[unit_index].lower():
+ num = num * 1024
+ unit_index += 1
+
+ return six.text_type(num)
+
+ def _cmd_lshost(self, **kwargs):
+ """svcinfo lshost -delim !
+ svcinfo lshost -delim ! <host>
+ """
+ if 'obj' not in kwargs:
+ rows = []
+ rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status'])
+ for host in self._hosts_list.itervalues():
+ rows.append([host['id'], host['host_name'], '1', '1',
+ 'degraded'])
+ if len(rows) > 1:
+ return self._print_cmd_info(rows=rows, **kwargs)
+ else:
+ return ('', '')
+ else:
+ host_name = kwargs['obj'].strip('\'\"')
+ if host_name not in self._hosts_list:
+ return self._errors['CMMVC50000']
+ host = self._hosts_list[host_name]
+ rows = []
+ rows.append(['id', host['id']])
+ rows.append(['name', host['host_name']])
+ rows.append(['port_count', '1'])
+ rows.append(['type', 'generic'])
+ rows.append(['mask', '1111'])
+ rows.append(['iogrp_count', '1'])
+ rows.append(['status', 'degraded'])
+ for port in host['iscsi_names']:
+ rows.append(['iscsi_name', port])
+ rows.append(['node_logged_in_count', '0'])
+ rows.append(['state', 'offline'])
+ for port in host['wwpns']:
+ rows.append(['WWPN', port])
+ rows.append(['node_logged_in_count', '0'])
+ rows.append(['state', 'active'])
+
+ if 'delim' in kwargs:
+ for index in range(len(rows)):
+ rows[index] = kwargs['delim'].join(rows[index])
+
+ return ('%s' % '\n'.join(rows), '')
+
+ def _cmd_lshostvdiskmap(self, **kwargs):
+ """svcinfo lshostvdiskmap -delim ! <host_name>"""
+
+ if 'obj' not in kwargs:
+ return self._errors['CMMVC50000']
+
+ host_name = kwargs['obj'].strip('\'\"')
+ if host_name not in self._hosts_list:
+ return self._errors['CMMVC50000']
+
+ rows = []
+ rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name',
+ 'vdisk_UID'])
+
+ for mapping in self._mappings_list.itervalues():
+ if (host_name == '') or (mapping['host'] == host_name):
+ volume = self._volumes_list[mapping['vol']]
+ rows.append([mapping['id'], mapping['host'],
+ mapping['lun'], volume['id'],
+ volume['name'], volume['vdisk_UID']])
+
+ return self._print_cmd_info(rows=rows, **kwargs)
+
+ def _cmd_lsmdiskgrp(self, **kwargs):
+ """svcinfo lsmdiskgrp -gui -bytes -delim ! <pool>"""
+
+ status = 'online'
+ if self._next_cmd_error['lsmdiskgrp'] == 'error':
+ self._next_cmd_error['lsmdiskgrp'] = ''
+ return self._errors['CMMVC50000']
+
+ if self._next_cmd_error['lsmdiskgrp'] == 'status=offline':
+ self._next_cmd_error['lsmdiskgrp'] = ''
+ status = 'offline'
+
+ rows = [None] * 2
+ rows[0] = ['id', 'status', 'mdisk_count', 'vdisk_count', 'capacity',
+ 'free_capacity', 'virtual_capacity', 'used_capacity',
+ 'real_capacity', 'encrypted', 'type', 'encrypt']
+ rows[1] = ['0', status, '1', '0', '3573412790272',
+ '3529432325160', '1693247906775', '277841182',
+ '38203734097', 'no', 'parent', 'no']
+
+ if kwargs['obj'] == 'mdiskgrp0':
+ row = rows[1]
+ else:
+ return self._errors['CMMVC50000']
+
+ objrows = []
+ for idx, val in enumerate(rows[0]):
+ objrows.append([val, row[idx]])
+
+ if 'delim' in kwargs:
+ for index in range(len(objrows)):
+ objrows[index] = kwargs['delim'].join(objrows[index])
+
+ return ('%s' % '\n'.join(objrows), '')
+
+ def _cmd_lsnode(self, **kwargs):
+ """svcinfo lsnode -delim !
+ svcinfo lsnode -delim ! <node>
+ """
+
+ if self._protocol == 'FC' or self._protocol == 'both':
+ port_status = 'active'
+ else:
+ port_status = 'unconfigured'
+
+ rows1 = [None] * 7
+ rows1[0] = ['name', 'node1']
+ rows1[1] = ['port_id', '000000000000001']
+ rows1[2] = ['port_status', port_status]
+ rows1[3] = ['port_speed', '8Gb']
+ rows1[4] = ['port_id', '000000000000001']
+ rows1[5] = ['port_status', port_status]
+ rows1[6] = ['port_speed', '8Gb']
+
+ rows2 = [None] * 7
+ rows2[0] = ['name', 'node2']
+ rows2[1] = ['port_id', '000000000000002']
+ rows2[2] = ['port_status', port_status]
+ rows2[3] = ['port_speed', '8Gb']
+ rows2[4] = ['port_id', '000000000000002']
+ rows2[5] = ['port_status', port_status]
+ rows2[6] = ['port_speed', 'N/A']
+
+ rows3 = [None] * 3
+ rows3[0] = ['id', 'name', 'UPS_serial_number', 'WWNN', 'status',
+ 'IO_group_id', 'IO_group_name', 'config_node',
+ 'UPS_unique_id', 'hardware', 'iscsi_name', 'iscsi_alias',
+ 'panel_name', 'enclosure_id', 'canister_id',
+ 'enclosure_serial_number']
+ rows3[1] = ['1', 'node1', '', '0123456789ABCDEF', 'online', '0',
+ 'io_grp0', 'yes', '', 'TR1', 'naa.0123456789ABCDEF', '',
+ '01-1', '1', '1', 'H441028']
+ rows3[2] = ['2', 'node2', '', '0123456789ABCDEF', 'online', '0',
+ 'io_grp0', 'no', '', 'TR1', 'naa.0123456789ABCDEF', '',
+ '01-2', '1', '2', 'H441028']
+
+ if self._next_cmd_error['lsnode'] == 'error':
+ self._next_cmd_error['lsnode'] = ''
+ return self._errors['CMMVC50000']
+
+ rows = None
+ if 'obj' not in kwargs:
+ rows = rows3
+ elif kwargs['obj'] == '1':
+ rows = rows1
+ elif kwargs['obj'] == '2':
+ rows = rows2
+ else:
+ return self._errors['CMMVC50000']
+
+ if self._next_cmd_error['lsnode'] == 'header_mismatch':
+ rows[0].pop(2)
+ self._next_cmd_error['lsnode'] = ''
+
+ return self._print_cmd_info(rows=rows, delim=kwargs.get('delim', None))
+
+ def _cmd_lssystem(self, **kwargs):
+ """svcinfo lssystem -delim !"""
+
+ open_access_enabled = 'off'
+
+ if self._next_cmd_error['lssystem'] == 'error':
+ self._next_cmd_error['lssystem'] = ''
+ return self._errors['CMMVC50000']
+
+ if self._next_cmd_error['lssystem'] == 'open_access_enabled=on':
+ self._next_cmd_error['lssystem'] = ''
+ open_access_enabled = 'on'
+
+ rows = [None] * 3
+ rows[0] = ['id', '0123456789ABCDEF']
+ rows[1] = ['name', 'flashsystem_1.2.3.4']
+ rows[2] = ['open_access_enabled', open_access_enabled]
+
+ return self._print_cmd_info(rows=rows, **kwargs)
+
+ def _cmd_lsportfc(self, **kwargs):
+ """svcinfo lsportfc"""
+
+ if self._protocol == 'FC' or self._protocol == 'both':
+ status = 'active'
+ else:
+ status = 'unconfigured'
+
+ rows = [None] * 3
+ rows[0] = ['id', 'canister_id', 'adapter_id', 'port_id', 'type',
+ 'port_speed', 'node_id', 'node_name', 'WWPN',
+ 'nportid', 'status', 'attachment', 'topology']
+ rows[1] = ['0', '1', '1', '1', 'fc',
+ '8Gb', '1', 'node_1', 'AABBCCDDEEFF0011',
+ '000000', status, 'host', 'al']
+ rows[2] = ['1', '1', '1', '1', 'fc',
+ '8Gb', '1', 'node_1', 'AABBCCDDEEFF0010',
+ '000000', status, 'host', 'al']
+ return self._print_cmd_info(rows=rows, **kwargs)
+
+ def _cmd_lsportip(self, **kwargs):
+ """svcinfo lsportip"""
+
+ if self._protocol == 'iSCSI' or self._protocol == 'both':
+ IP_address1 = '192.168.1.10'
+ IP_address2 = '192.168.1.11'
+ state = 'online'
+ speed = '8G'
+ else:
+ IP_address1 = ''
+ IP_address2 = ''
+ state = ''
+ speed = ''
+
+ rows = [None] * 3
+ rows[0] = ['id', 'node_id', 'node_name', 'canister_id', 'adapter_id',
+ 'port_id', 'IP_address', 'mask', 'gateway', 'IP_address_6',
+ 'prefix_6', 'gateway_6', 'MAC', 'duplex', 'state', 'speed',
+ 'failover', 'link_state', 'host', 'host_6', 'vlan',
+ 'vlan_6', 'adapter_location', 'adapter_port_id']
+ rows[1] = ['1', '1', 'node1', '0', '0',
+ '0', IP_address1, '', '', '',
+ '0', '', '11:22:33:44:55:AA', '', state, speed,
+ 'no', 'active', '', '', '', '', '0', '0']
+ rows[2] = ['2', '2', 'node2', '0', '0',
+ '0', IP_address2, '', '', '',
+ '0', '', '11:22:33:44:55:BB', '', state, speed,
+ 'no', 'active', '', '', '', '', '0', '0']
+
+ return self._print_cmd_info(rows=rows, **kwargs)
+
+ def _cmd_lsvdisk(self, **kwargs):
+ """cmd: svcinfo lsvdisk -gui -bytes -delim ! <vdisk_name>"""
+
+ if 'obj' not in kwargs or (
+ 'delim' not in kwargs) or (
+ 'bytes' not in kwargs):
+ return self._errors['CMMVC50000']
+
+ if kwargs['obj'] not in self._volumes_list:
+ return self._errors['CMMVC50000']
+
+ vol = self._volumes_list[kwargs['obj']]
+
+ rows = []
+ rows.append(['id', vol['id']])
+ rows.append(['name', vol['name']])
+ rows.append(['status', vol['status']])
+ rows.append(['capacity', vol['capacity']])
+ rows.append(['vdisk_UID', vol['vdisk_UID']])
+ rows.append(['udid', ''])
+ rows.append(['open_access_scsi_id', '1'])
+ rows.append(['parent_mdisk_grp_id', '0'])
+ rows.append(['parent_mdisk_grp_name', 'mdiskgrp0'])
+
+ for index in range(len(rows)):
+ rows[index] = kwargs['delim'].join(rows[index])
+ return ('%s' % '\n'.join(rows), '')
+
+ def _cmd_lsvdiskhostmap(self, **kwargs):
+ """svcinfo lsvdiskhostmap -delim ! <vdisk_name>"""
+
+ if 'obj' not in kwargs or (
+ 'delim' not in kwargs):
+ return self._errors['CMMVC50000']
+
+ vdisk_name = kwargs['obj']
+ if vdisk_name not in self._volumes_list:
+ return self._errors['CMMVC50000']
+
+ rows = []
+ rows.append(['id', 'name', 'SCSI_id', 'host_id', 'host_name',
+ 'vdisk_UID', 'IO_group_id', 'IO_group_name'])
+
+ mappings_found = 0
+ for mapping in self._mappings_list.itervalues():
+ if (mapping['vol'] == vdisk_name):
+ mappings_found += 1
+ volume = self._volumes_list[mapping['vol']]
+ host = self._hosts_list[mapping['host']]
+ rows.append([volume['id'], volume['name'], '1', host['id'],
+ host['host_name'], volume['vdisk_UID'],
+ '0', 'mdiskgrp0'])
+
+ if mappings_found:
+ return self._print_cmd_info(rows=rows, **kwargs)
+ else:
+ return ('', '')
+
+ def _cmd_expandvdisksize(self, **kwargs):
+ """svctask expandvdisksize -size <size> -unit gb <vdisk_name>"""
+
+ if 'obj' not in kwargs:
+ return self._errors['CMMVC50000']
+ vol_name = kwargs['obj'].strip('\'\"')
+
+ if 'size' not in kwargs:
+ return self._errors['CMMVC50000']
+ size = int(kwargs['size'])
+
+ if vol_name not in self._volumes_list:
+ return self._errors['CMMVC50000']
+
+ curr_size = int(self._volumes_list[vol_name]['capacity'])
+ addition = size * units.Gi
+ self._volumes_list[vol_name]['capacity'] = six.text_type(
+ curr_size + addition)
+ return ('', '')
+
+ def _cmd_mkvdisk(self, **kwargs):
+ """svctask mkvdisk -name <name> -mdiskgrp <mdiskgrp> -iogrp <iogrp>
+ -size <size> -unit <unit>
+ """
+
+ if 'name' not in kwargs or (
+ 'size' not in kwargs) or (
+ 'unit' not in kwargs):
+ return self._errors['CMMVC50000']
+
+ vdisk_info = {}
+ vdisk_info['id'] = self._find_unused_id(self._volumes_list)
+ vdisk_info['name'] = kwargs['name'].strip('\'\"')
+ vdisk_info['status'] = 'online'
+ vdisk_info['capacity'] = self._convert_units_bytes(
+ int(kwargs['size']), kwargs['unit'])
+ vdisk_info['vdisk_UID'] = ('60050760') + ('0' * 14) + vdisk_info['id']
+
+ if vdisk_info['name'] in self._volumes_list:
+ return self._errors['CMMVC50000']
+ else:
+ self._volumes_list[vdisk_info['name']] = vdisk_info
+ return ('Virtual Disk, id [%s], successfully created' %
+ (vdisk_info['id']), '')
+
+ def _cmd_rmvdisk(self, **kwargs):
+ """svctask rmvdisk -force <vdisk_name>"""
+
+ if 'obj' not in kwargs:
+ return self._errors['CMMVC50000']
+
+ vdisk_name = kwargs['obj'].strip('\'\"')
+
+ if vdisk_name not in self._volumes_list:
+ return self._errors['CMMVC50000']
+
+ del self._volumes_list[vdisk_name]
+ return ('', '')
+
+ def _add_port_to_host(self, host_info, **kwargs):
+ if 'iscsiname' in kwargs:
+ added_key = 'iscsi_names'
+ added_val = kwargs['iscsiname'].strip('\'\"')
+ elif 'hbawwpn' in kwargs:
+ added_key = 'wwpns'
+ added_val = kwargs['hbawwpn'].strip('\'\"')
+ else:
+ return self._errors['CMMVC50000']
+
+ host_info[added_key].append(added_val)
+
+ for v in self._hosts_list.itervalues():
+ if v['id'] == host_info['id']:
+ continue
+ for port in v[added_key]:
+ if port == added_val:
+ return self._errors['CMMVC50000']
+ return ('', '')
+
+ def _cmd_mkhost(self, **kwargs):
+ """svctask mkhost -force -hbawwpn <wwpn> -name <host_name>
+ svctask mkhost -force -iscsiname <initiator> -name <host_name>
+ """
+
+ if 'name' not in kwargs:
+ return self._errors['CMMVC50000']
+
+ host_name = kwargs['name'].strip('\'\"')
+ if self._is_invalid_name(host_name):
+ return self._errors['CMMVC50000']
+ if host_name in self._hosts_list:
+ return self._errors['CMMVC50000']
+
+ host_info = {}
+ host_info['id'] = self._find_unused_id(self._hosts_list)
+ host_info['host_name'] = host_name
+ host_info['iscsi_names'] = []
+ host_info['wwpns'] = []
+
+ out, err = self._add_port_to_host(host_info, **kwargs)
+ if not len(err):
+ self._hosts_list[host_name] = host_info
+ return ('Host, id [%s], successfully created' %
+ (host_info['id']), '')
+ else:
+ return (out, err)
+
+ def _cmd_addhostport(self, **kwargs):
+ """svctask addhostport -force -hbawwpn <wwpn> <host>
+ svctask addhostport -force -iscsiname <initiator> <host>
+ """
+
+ if 'obj' not in kwargs:
+ return self._errors['CMMVC50000']
+ host_name = kwargs['obj'].strip('\'\"')
+
+ if host_name not in self._hosts_list:
+ return self._errors['CMMVC50000']
+
+ host_info = self._hosts_list[host_name]
+ return self._add_port_to_host(host_info, **kwargs)
+
+ def _cmd_rmhost(self, **kwargs):
+ """svctask rmhost <host>"""
+
+ if 'obj' not in kwargs:
+ return self._errors['CMMVC50000']
+
+ host_name = kwargs['obj'].strip('\'\"')
+ if host_name not in self._hosts_list:
+ return self._errors['CMMVC50000']
+
+ for v in self._mappings_list.itervalues():
+ if (v['host'] == host_name):
+ return self._errors['CMMVC50000']
+
+ del self._hosts_list[host_name]
+ return ('', '')
+
+ def _cmd_mkvdiskhostmap(self, **kwargs):
+ """svctask mkvdiskhostmap -host <host> -scsi <lun> <vdisk_name>"""
+
+ mapping_info = {}
+ mapping_info['id'] = self._find_unused_id(self._mappings_list)
+
+ if 'host' not in kwargs or (
+ 'scsi' not in kwargs) or (
+ 'obj' not in kwargs):
+ return self._errors['CMMVC50000']
+ mapping_info['host'] = kwargs['host'].strip('\'\"')
+ mapping_info['lun'] = kwargs['scsi'].strip('\'\"')
+ mapping_info['vol'] = kwargs['obj'].strip('\'\"')
+
+ if mapping_info['vol'] not in self._volumes_list:
+ return self._errors['CMMVC50000']
+
+ if mapping_info['host'] not in self._hosts_list:
+ return self._errors['CMMVC50000']
+
+ if mapping_info['vol'] in self._mappings_list:
+ return self._errors['CMMVC50000']
+
+ for v in self._mappings_list.itervalues():
+ if ((v['host'] == mapping_info['host']) and
+ (v['lun'] == mapping_info['lun'])):
+ return self._errors['CMMVC50000']
+
+ for v in self._mappings_list.itervalues():
+ if (v['lun'] == mapping_info['lun']) and ('force' not in kwargs):
+ return self._errors['CMMVC50000']
+
+ self._mappings_list[mapping_info['id']] = mapping_info
+ return ('Virtual Disk to Host map, id [%s], successfully created'
+ % (mapping_info['id']), '')
+
+ def _cmd_rmvdiskhostmap(self, **kwargs):
+ """svctask rmvdiskhostmap -host <host> <vdisk_name>"""
+
+ if 'host' not in kwargs or 'obj' not in kwargs:
+ return self._errors['CMMVC50000']
+ host = kwargs['host'].strip('\'\"')
+ vdisk = kwargs['obj'].strip('\'\"')
+
+ mapping_ids = []
+ for v in self._mappings_list.itervalues():
+ if v['vol'] == vdisk:
+ mapping_ids.append(v['id'])
+ if not mapping_ids:
+ return self._errors['CMMVC50000']
+
+ this_mapping = None
+ for mapping_id in mapping_ids:
+ if self._mappings_list[mapping_id]['host'] == host:
+ this_mapping = mapping_id
+ if this_mapping is None:
+ return self._errors['CMMVC50000']
+
+ del self._mappings_list[this_mapping]
+ return ('', '')
+
+ def set_protocol(self, protocol):
+ self._protocol = protocol
+
+ def execute_command(self, cmd, check_exit_code=True):
+ try:
+ kwargs = self._cmd_to_dict(cmd)
+ except exception.InvalidInput:
+ return self._errors['CMMVC50000']
+
+ command = kwargs['cmd']
+ del kwargs['cmd']
+ func = getattr(self, '_cmd_' + command)
+ out, err = func(**kwargs)
+
+ if (check_exit_code) and (len(err) != 0):
+ raise processutils.ProcessExecutionError(exit_code=1,
+ stdout=out,
+ stderr=err,
+ cmd=command)
+ return (out, err)
+
+ def error_injection(self, cmd, error):
+ self._next_cmd_error[cmd] = error
+
+
+class FlashSystemFakeDriver(flashsystem.FlashSystemDriver):
+ def __init__(self, *args, **kwargs):
+ super(FlashSystemFakeDriver, self).__init__(*args, **kwargs)
+
+ def set_fake_storage(self, fake):
+ self.fake_storage = fake
+
+ def _ssh(self, cmd, check_exit_code=True):
+ try:
+ LOG.debug('Run CLI command: %s' % cmd)
+ utils.check_ssh_injection(cmd)
+ ret = self.fake_storage.execute_command(cmd, check_exit_code)
+ (stdout, stderr) = ret
+ LOG.debug('CLI output:\n stdout: %(stdout)s\n stderr: '
+ '%(stderr)s' % {'stdout': stdout, 'stderr': stderr})
+
+ except processutils.ProcessExecutionError as e:
+ with excutils.save_and_reraise_exception():
+ LOG.debug('CLI Exception output:\n stdout: %(out)s\n '
+ 'stderr: %(err)s' % {'out': e.stdout,
+ 'err': e.stderr})
+ return ret
+
+
+class FlashSystemDriverTestCase(test.TestCase):
+
+ def _set_flag(self, flag, value):
+ group = self.driver.configuration.config_group
+ self.driver.configuration.set_override(flag, value, group)
+
+ def _reset_flags(self):
+ self.driver.configuration.local_conf.reset()
+ for k, v in self._def_flags.iteritems():
+ self._set_flag(k, v)
+
+ def _generate_vol_info(self,
+ vol_name,
+ vol_size=10,
+ vol_status='available'):
+ rand_id = six.text_type(random.randint(10000, 99999))
+ if not vol_name:
+ vol_name = 'test_volume%s' % rand_id
+
+ return {'name': vol_name,
+ 'size': vol_size,
+ 'id': '%s' % rand_id,
+ 'volume_type_id': None,
+ 'status': vol_status,
+ 'mdisk_grp_name': 'mdiskgrp0'}
+
+ def _generate_snap_info(self,
+ vol_name,
+ vol_id,
+ vol_size,
+ vol_status,
+ snap_status='available'):
+ rand_id = six.text_type(random.randint(10000, 99999))
+ return {'name': 'test_snap_%s' % rand_id,
+ 'id': rand_id,
+ 'volume': {'name': vol_name,
+ 'id': vol_id,
+ 'size': vol_size,
+ 'status': vol_status},
+ 'volume_size': vol_size,
+ 'status': snap_status,
+ 'mdisk_grp_name': 'mdiskgrp0'}
+
+ def setUp(self):
+ super(FlashSystemDriverTestCase, self).setUp()
+
+ self._def_flags = {'san_ip': 'hostname',
+ 'san_login': 'username',
+ 'san_password': 'password',
+ 'flashsystem_connection_protocol': 'FC',
+ 'flashsystem_multipath_enabled': False,
+ 'flashsystem_multihostmap_enabled': True}
+
+ self.connector = {
+ 'host': 'flashsystem',
+ 'wwnns': ['0123456789abcdef', '0123456789abcdeg'],
+ 'wwpns': ['abcd000000000001', 'abcd000000000002'],
+ 'initiator': 'iqn.123456'}
+
+ self.sim = FlashSystemManagementSimulator()
+ self.driver = FlashSystemFakeDriver(
+ configuration=conf.Configuration(None))
+ self.driver.set_fake_storage(self.sim)
+
+ self._reset_flags()
+ self.ctxt = context.get_admin_context()
+ self.driver.do_setup(None)
+ self.driver.check_for_setup_error()
+
+ self.sleeppatch = mock.patch('eventlet.greenthread.sleep')
+ self.sleeppatch.start()
+
+ def tearDown(self):
+ self.sleeppatch.stop()
+ super(FlashSystemDriverTestCase, self).tearDown()
+
+ def test_flashsystem_do_setup(self):
+ # case 1: cmd lssystem encounters error
+ self.sim.error_injection('lssystem', 'error')
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.do_setup, None)
+
+ # case 2: open_access_enabled is not off
+ self.sim.error_injection('lssystem', 'open_access_enabled=on')
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.do_setup, None)
+
+ # case 3: cmd lsmdiskgrp encounters error
+ self.sim.error_injection('lsmdiskgrp', 'error')
+ self.assertRaises(exception.InvalidInput,
+ self.driver.do_setup, None)
+
+ # case 4: status is not online
+ self.sim.error_injection('lsmdiskgrp', 'status=offline')
+ self.assertRaises(exception.InvalidInput,
+ self.driver.do_setup, None)
+
+ # case 5: cmd lsnode encounters error
+ self.sim.error_injection('lsnode', 'error')
+ self.assertRaises(processutils.ProcessExecutionError,
+ self.driver.do_setup, None)
+
+ # case 6: cmd lsnode header does not match
+ self.sim.error_injection('lsnode', 'header_mismatch')
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.do_setup, None)
+
+ # case 7: set as FC
+ self.sim.set_protocol('FC')
+ self.driver.do_setup(None)
+ self.assertEqual('FC', self.driver._protocol)
+
+ # case 8: no configured nodes available
+ self.sim.set_protocol('unknown')
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.do_setup, None)
+
+ # clear environment
+ self.sim.set_protocol('FC')
+ self.driver.do_setup(None)
+
+ def test_flashsystem_check_for_setup_error(self):
+ self._set_flag('san_ip', '')
+ self.assertRaises(exception.InvalidInput,
+ self.driver.check_for_setup_error)
+ self._reset_flags()
+
+ self._set_flag('san_ssh_port', '')
+ self.assertRaises(exception.InvalidInput,
+ self.driver.check_for_setup_error)
+ self._reset_flags()
+
+ self._set_flag('san_login', '')
+ self.assertRaises(exception.InvalidInput,
+ self.driver.check_for_setup_error)
+ self._reset_flags()
+
+ self._set_flag('san_password', None)
+ self._set_flag('san_private_key', None)
+ self.assertRaises(exception.InvalidInput,
+ self.driver.check_for_setup_error)
+ self._reset_flags()
+
+ self._set_flag('flashsystem_connection_protocol', 'foo')
+ self.assertRaises(exception.InvalidInput,
+ self.driver.check_for_setup_error)
+ self._reset_flags()
+
+ # clear environment
+ self.driver.do_setup(None)
+
+ def test_flashsystem_validate_connector(self):
+ conn_neither = {'host': 'host'}
+ conn_iscsi = {'host': 'host', 'initiator': 'foo'}
+ conn_fc = {'host': 'host', 'wwpns': 'bar'}
+ conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'}
+
+ protocol = self.driver._protocol
+
+ # case 1: when protocol is FC
+ self.driver._protocol = 'FC'
+ self.driver.validate_connector(conn_fc)
+ self.driver.validate_connector(conn_both)
+ self.assertRaises(exception.VolumeDriverException,
+ self.driver.validate_connector, conn_iscsi)
+ self.assertRaises(exception.VolumeDriverException,
+ self.driver.validate_connector, conn_neither)
+
+ # clear environment
+ self.driver._protocol = protocol
+
+ def test_flashsystem_volumes(self):
+ # case 1: create volume
+ vol = self._generate_vol_info(None)
+ self.driver.create_volume(vol)
+
+ # Check whether volume is created successfully
+ attributes = self.driver._get_vdisk_attributes(vol['name'])
+ attr_size = float(attributes['capacity']) / units.Gi
+ self.assertEqual(float(vol['size']), attr_size)
+
+ # case 2: delete volume
+ self.driver.delete_volume(vol)
+
+ # case 3: delete volume that doesn't exist (expected not fail)
+ vol_no_exist = self._generate_vol_info(None)
+ self.driver.delete_volume(vol_no_exist)
+
+ def test_flashsystem_extend_volume(self):
+ vol = self._generate_vol_info(None)
+ self.driver.create_volume(vol)
+ self.driver.extend_volume(vol, '200')
+ attrs = self.driver._get_vdisk_attributes(vol['name'])
+ vol_size = int(attrs['capacity']) / units.Gi
+ self.assertAlmostEqual(vol_size, 200)
+
+ # clear environment
+ self.driver.delete_volume(vol)
+
+ def test_flashsystem_connection(self):
+ # case 1: initialize_connection/terminate_connection for good path
+ vol1 = self._generate_vol_info(None)
+ self.driver.create_volume(vol1)
+ self.driver.initialize_connection(vol1, self.connector)
+ self.driver.terminate_connection(vol1, self.connector)
+
+ # case 2: when volume is not existed
+ vol2 = self._generate_vol_info(None)
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.initialize_connection,
+ vol2, self.connector)
+
+ # case 3: _get_vdisk_map_properties raises exception
+ with mock.patch.object(flashsystem.FlashSystemDriver,
+ '_get_vdisk_map_properties') as get_properties:
+ get_properties.side_effect = exception.VolumeBackendAPIException
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.initialize_connection,
+ vol1, self.connector)
+
+ # clear environment
+ self.driver.delete_volume(vol1)
+
+ @mock.patch.object(flashsystem.FlashSystemDriver,
+ '_create_and_copy_vdisk_data')
+ def test_flashsystem_create_snapshot(self, _create_and_copy_vdisk_data):
+ # case 1: good path
+ vol1 = self._generate_vol_info(None)
+ snap1 = self._generate_snap_info(vol1['name'],
+ vol1['id'],
+ vol1['size'],
+ vol1['status'])
+ self.driver.create_snapshot(snap1)
+
+ # case 2: when volume status is error
+ vol2 = self._generate_vol_info(None, vol_status='error')
+ snap2 = self._generate_snap_info(vol2['name'],
+ vol2['id'],
+ vol2['size'],
+ vol2['status'])
+ self.assertRaises(exception.InvalidVolume,
+ self.driver.create_snapshot, snap2)
+
+ @mock.patch.object(flashsystem.FlashSystemDriver,
+ '_delete_vdisk')
+ def test_flashsystem_delete_snapshot(self, _delete_vdisk):
+ vol1 = self._generate_vol_info(None)
+ snap1 = self._generate_snap_info(vol1['name'],
+ vol1['id'],
+ vol1['size'],
+ vol1['status'])
+ self.driver.delete_snapshot(snap1)
+
+ @mock.patch.object(flashsystem.FlashSystemDriver,
+ '_create_and_copy_vdisk_data')
+ def test_flashsystem_create_volume_from_snapshot(
+ self, _create_and_copy_vdisk_data):
+ # case 1: good path
+ vol = self._generate_vol_info(None)
+ snap = self._generate_snap_info(vol['name'],
+ vol['id'],
+ vol['size'],
+ vol['status'])
+ self.driver.create_volume_from_snapshot(vol, snap)
+
+ # case 2: when size does not match
+ vol = self._generate_vol_info(None, vol_size=100)
+ snap = self._generate_snap_info(vol['name'],
+ vol['id'],
+ 200,
+ vol['status'])
+ self.assertRaises(exception.VolumeDriverException,
+ self.driver.create_volume_from_snapshot,
+ vol, snap)
+
+ # case 3: when snapshot status is not available
+ vol = self._generate_vol_info(None)
+ snap = self._generate_snap_info(vol['name'],
+ vol['id'],
+ vol['size'],
+ vol['status'],
+ snap_status='error')
+ self.assertRaises(exception.InvalidSnapshot,
+ self.driver.create_volume_from_snapshot,
+ vol, snap)
+
+ @mock.patch.object(flashsystem.FlashSystemDriver,
+ '_create_and_copy_vdisk_data')
+ def test_flashsystem_create_cloned_volume(
+ self, _create_and_copy_vdisk_data):
+ # case 1: good path
+ vol1 = self._generate_vol_info(None)
+ vol2 = self._generate_vol_info(None)
+ self.driver.create_cloned_volume(vol2, vol1)
+
+ # case 2: when size does not match
+ vol1 = self._generate_vol_info(None, vol_size=10)
+ vol2 = self._generate_vol_info(None, vol_size=20)
+ self.assertRaises(exception.VolumeDriverException,
+ self.driver.create_cloned_volume,
+ vol2, vol1)
+
+ def test_flashsystem_get_volume_stats(self):
+ # case 1: good path
+ self._set_flag('reserved_percentage', 25)
+ pool = 'mdiskgrp0'
+ backend_name = 'flashsystem_1.2.3.4' + '_' + pool
+
+ stats = self.driver.get_volume_stats()
+
+ self.assertEqual(25, stats['reserved_percentage'])
+ self.assertEqual('IBM', stats['vendor_name'])
+ self.assertEqual('FC', stats['storage_protocol'])
+ self.assertEqual(backend_name, stats['volume_backend_name'])
+
+ self._reset_flags()
+
+ # case 2: when lsmdiskgrp returns error
+ self.sim.error_injection('lsmdiskgrp', 'error')
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.get_volume_stats, refresh=True)
+
+ @mock.patch.object(flashsystem.FlashSystemDriver,
+ '_copy_vdisk_data')
+ def test_flashsystem_create_and_copy_vdisk_data(self, _copy_vdisk_data):
+ # case 1: when volume does not exist
+ vol1 = self._generate_vol_info(None)
+ vol2 = self._generate_vol_info(None)
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver._create_and_copy_vdisk_data,
+ vol1['name'], vol1['id'], vol2['name'], vol2['id'])
+
+ # case 2: good path
+ self.driver.create_volume(vol1)
+ self.driver._create_and_copy_vdisk_data(
+ vol1['name'], vol1['id'], vol2['name'], vol2['id'])
+ self.driver.delete_volume(vol1)
+ self.driver.delete_volume(vol2)
+
+ # case 3: _copy_vdisk_data raises exception
+ self.driver.create_volume(vol1)
+ _copy_vdisk_data.side_effect = exception.VolumeBackendAPIException
+ self.assertRaises(
+ exception.VolumeBackendAPIException,
+ self.driver._create_and_copy_vdisk_data,
+ vol1['name'], vol1['id'], vol2['name'], vol2['id'])
+ self.assertEqual(set(), self.driver._vdisk_copy_in_progress)
+
+ # clear environment
+ self.driver.delete_volume(vol1)
+ self.driver.delete_volume(vol2)
+
+ @mock.patch.object(volume_utils, 'copy_volume')
+ @mock.patch.object(flashsystem.FlashSystemDriver, '_scan_device')
+ @mock.patch.object(flashsystem.FlashSystemDriver, '_remove_device')
+ @mock.patch.object(utils, 'brick_get_connector_properties')
+ def test_flashsystem_copy_vdisk_data(self,
+ _connector,
+ _remove_device,
+ _scan_device,
+ copy_volume):
+
+ connector = _connector.return_value = self.connector
+ vol1 = self._generate_vol_info(None)
+ vol2 = self._generate_vol_info(None)
+ self.driver.create_volume(vol1)
+ self.driver.create_volume(vol2)
+
+ # case 1: no mapped before copy
+ self.driver._copy_vdisk_data(
+ vol1['name'], vol1['id'], vol2['name'], vol2['id'])
+ (v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector)
+ (v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector)
+ self.assertEqual(False, v1_mapped)
+ self.assertEqual(False, v2_mapped)
+
+ # case 2: mapped before copy
+ self.driver.initialize_connection(vol1, connector)
+ self.driver.initialize_connection(vol2, connector)
+ self.driver._copy_vdisk_data(
+ vol1['name'], vol1['id'], vol2['name'], vol2['id'])
+ (v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector)
+ (v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector)
+ self.assertEqual(True, v1_mapped)
+ self.assertEqual(True, v2_mapped)
+ self.driver.terminate_connection(vol1, connector)
+ self.driver.terminate_connection(vol2, connector)
+
+ # case 3: no mapped before copy, raise exception when scan
+ _scan_device.side_effect = exception.VolumeBackendAPIException
+ self.assertRaises(
+ exception.VolumeBackendAPIException,
+ self.driver._copy_vdisk_data,
+ vol1['name'], vol1['id'], vol2['name'], vol2['id'])
+ (v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector)
+ (v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector)
+ self.assertEqual(False, v1_mapped)
+ self.assertEqual(False, v2_mapped)
+
+ # case 4: no mapped before copy, raise exception when copy
+ copy_volume.side_effect = exception.VolumeBackendAPIException
+ self.assertRaises(
+ exception.VolumeBackendAPIException,
+ self.driver._copy_vdisk_data,
+ vol1['name'], vol1['id'], vol2['name'], vol2['id'])
+ (v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector)
+ (v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector)
+ self.assertEqual(False, v1_mapped)
+ self.assertEqual(False, v2_mapped)
+
+ # clear environment
+ self.driver.delete_volume(vol1)
+ self.driver.delete_volume(vol2)
+
+ def test_flashsystem_connector_to_hostname_prefix(self):
+ # Invalid characters will be translated to '-'
+
+ # case 1: host name is unicode with invalid characters
+ conn = {'host': u'unicode.test}.abc{.abc'}
+ self.assertEqual(u'unicode.test-.abc-.abc',
+ self.driver._connector_to_hostname_prefix(conn))
+
+ # case 2: host name is string with invalid characters
+ conn = {'host': 'string.test}.abc{.abc'}
+ self.assertEqual('string.test-.abc-.abc',
+ self.driver._connector_to_hostname_prefix(conn))
+
+ # case 3: host name is neither unicode nor string
+ conn = {'host': 12345}
+ self.assertRaises(exception.NoValidHost,
+ self.driver._connector_to_hostname_prefix,
+ conn)
+
+ # case 4: host name started with number will be translated
+ conn = {'host': '192.168.1.1'}
+ self.assertEqual('_192.168.1.1',
+ self.driver._connector_to_hostname_prefix(conn))
+
+ def test_flashsystem_create_host(self):
+ # case 1: create host
+ conn = {
+ 'host': 'flashsystem',
+ 'wwnns': ['0123456789abcdef', '0123456789abcdeg'],
+ 'wwpns': ['abcd000000000001', 'abcd000000000002'],
+ 'initiator': 'iqn.123456'}
+ host = self.driver._create_host(conn)
+
+ # case 2: create host that already exists
+ self.assertRaises(processutils.ProcessExecutionError,
+ self.driver._create_host,
+ conn)
+
+ # case 3: delete host
+ self.driver._delete_host(host)
+
+ # case 4: create host with empty ports
+ conn = {'host': 'flashsystem', 'wwpns': []}
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver._create_host,
+ conn)
+
+ def test_flashsystem_find_host_exhaustive(self):
+ # case 1: create host and find it
+ conn1 = {
+ 'host': 'flashsystem-01',
+ 'wwnns': ['1111111111abcdef', '1111111111abcdeg'],
+ 'wwpns': ['1111111111000001', '1111111111000002'],
+ 'initiator': 'iqn.111111'}
+ conn2 = {
+ 'host': 'flashsystem-02',
+ 'wwnns': ['2222222222abcdef', '2222222222abcdeg'],
+ 'wwpns': ['2222222222000001', '2222222222000002'],
+ 'initiator': 'iqn.222222'}
+ conn3 = {
+ 'host': 'flashsystem-03',
+ 'wwnns': ['3333333333abcdef', '3333333333abcdeg'],
+ 'wwpns': ['3333333333000001', '3333333333000002'],
+ 'initiator': 'iqn.333333'}
+ host1 = self.driver._create_host(conn1)
+ host2 = self.driver._create_host(conn2)
+ self.assertEqual(
+ host2,
+ self.driver._find_host_exhaustive(conn2, [host1, host2]))
+ self.assertEqual(
+ None,
+ self.driver._find_host_exhaustive(conn3, [host1, host2]))
+
+ # clear environment
+ self.driver._delete_host(host1)
+ self.driver._delete_host(host2)
+
+ def test_flashsystem_get_vdisk_params(self):
+ # case 1: use default params
+ self.driver._get_vdisk_params(None)
+
+ # case 2: use extra params from type
+ opts1 = {'storage_protocol': 'FC'}
+ opts2 = {'capabilities:storage_protocol': 'FC'}
+ opts3 = {'storage_protocol': 'iSCSI'}
+ type1 = volume_types.create(self.ctxt, 'opts1', opts1)
+ type2 = volume_types.create(self.ctxt, 'opts2', opts2)
+ type3 = volume_types.create(self.ctxt, 'opts3', opts3)
+ self.assertEqual(
+ 'FC',
+ self.driver._get_vdisk_params(type1['id'])['protocol'])
+ self.assertEqual(
+ 'FC',
+ self.driver._get_vdisk_params(type2['id'])['protocol'])
+ self.assertRaises(exception.InvalidInput,
+ self.driver._get_vdisk_params,
+ type3['id'])
+
+ # clear environment
+ volume_types.destroy(self.ctxt, type1['id'])
+ volume_types.destroy(self.ctxt, type2['id'])
+
+ def test_flashsystem_map_vdisk_to_host(self):
+ # case 1: no host found
+ vol1 = self._generate_vol_info(None)
+ self.driver.create_volume(vol1)
+ self.assertEqual(
+ # lun id shoud begin with 1
+ 1,
+ self.driver._map_vdisk_to_host(vol1['name'], self.connector))
+
+ # case 2: host already exists
+ vol2 = self._generate_vol_info(None)
+ self.driver.create_volume(vol2)
+ self.assertEqual(
+ # lun id shoud be sequential
+ 2,
+ self.driver._map_vdisk_to_host(vol2['name'], self.connector))
+
+ # case 3: test if already mapped
+ self.assertEqual(
+ 1,
+ self.driver._map_vdisk_to_host(vol1['name'], self.connector))
+
+ # clean environment
+ self.driver._unmap_vdisk_from_host(vol1['name'], self.connector)
+ self.driver._unmap_vdisk_from_host(vol2['name'], self.connector)
+ self.driver.delete_volume(vol1)
+ self.driver.delete_volume(vol2)
+
+ # case 4: If there is no vdisk mapped to host, host should be removed
+ self.assertEqual(
+ None,
+ self.driver._get_host_from_connector(self.connector))
--- /dev/null
+# Copyright 2014 IBM Corp.
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+Volume driver for IBM FlashSystem storage systems.
+
+Limitations:
+1. Cinder driver only works when open_access_enabled=off.
+2. Cinder driver only works when connection protocol is FC.
+
+"""
+
+import random
+import re
+import string
+import threading
+
+from oslo.concurrency import processutils
+from oslo.config import cfg
+from oslo.utils import excutils
+from oslo.utils import units
+import six
+
+from cinder import context
+from cinder import exception
+from cinder.i18n import _, _LE, _LI, _LW
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import loopingcall
+from cinder.openstack.common import strutils
+from cinder import utils
+from cinder.volume.drivers.san import san
+from cinder.volume import utils as volume_utils
+from cinder.volume import volume_types
+from cinder.zonemanager import utils as fczm_utils
+
+LOG = logging.getLogger(__name__)
+
+FLASHSYSTEM_VOLPOOL_NAME = 'mdiskgrp0'
+FLASHSYSTEM_VOL_IOGRP = 0
+
+flashsystem_opts = [
+ cfg.StrOpt('flashsystem_connection_protocol',
+ default='FC',
+ help='Connection protocol should be FC.'),
+ cfg.BoolOpt('flashsystem_multipath_enabled',
+ default=False,
+ help='Connect with multipath (FC only).'),
+ cfg.BoolOpt('flashsystem_multihostmap_enabled',
+ default=True,
+ help='Allows vdisk to multi host mapping.')
+]
+
+CONF = cfg.CONF
+CONF.register_opts(flashsystem_opts)
+
+
+class FlashSystemDriver(san.SanDriver):
+ """IBM FlashSystem 840 FC volume driver.
+
+ Version history:
+ 1.0.0 - Initial driver
+
+ """
+
+ VERSION = "1.0.0"
+
+ def __init__(self, *args, **kwargs):
+ super(FlashSystemDriver, self).__init__(*args, **kwargs)
+ self.configuration.append_config_values(flashsystem_opts)
+ self._storage_nodes = {}
+ self._protocol = None
+ self._context = None
+ self._system_name = None
+ self._system_id = None
+
+ def _ssh(self, ssh_cmd, check_exit_code=True):
+ try:
+ return self._run_ssh(ssh_cmd, check_exit_code)
+ except processutils.ProcessExecutionError as e:
+ msg = (_('CLI Exception output:\n command: %(cmd)s\n '
+ 'stdout: %(out)s\n stderr: %(err)s')
+ % {'cmd': ssh_cmd, 'out': e.stdout,
+ 'err': e.stderr})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ def _append_dict(self, dict_, key, value):
+ key, value = key.strip(), value.strip()
+ obj = dict_.get(key, None)
+ if obj is None:
+ dict_[key] = value
+ elif isinstance(obj, list):
+ obj.append(value)
+ dict_[key] = obj
+ else:
+ dict_[key] = [obj, value]
+ return dict_
+
+ def _assert_ssh_return(self, test, fun, ssh_cmd, out, err):
+ self._driver_assert(test,
+ (_('%(fun)s: Failed with unexpected CLI output.\n '
+ 'Command: %(cmd)s\n stdout: %(out)s\n '
+ 'stderr: %(err)s')
+ % {'fun': fun, 'cmd': ssh_cmd,
+ 'out': six.text_type(out),
+ 'err': six.text_type(err)}))
+
+ def _build_default_params(self):
+ return {'protocol': self.configuration.flashsystem_connection_protocol,
+ 'multipath': self.configuration.flashsystem_multipath_enabled}
+
+ def _build_initiator_target_map(self, initiator_wwpns, target_wwpns):
+ map = {}
+ for i_wwpn in initiator_wwpns:
+ idx = six.text_type(i_wwpn)
+ map[idx] = []
+ for t_wwpn in target_wwpns:
+ map[idx].append(t_wwpn)
+ return map
+
+ def _check_vdisk_params(self, params):
+ # Check that the requested protocol is enabled
+ if params['protocol'] != self._protocol:
+ msg = (_("Illegal value '%(prot)s' specified for "
+ "flashsystem_connection_protocol: "
+ "valid value(s) are %(enabled)s.")
+ % {'prot': params['protocol'],
+ 'enabled': self._protocol})
+ raise exception.InvalidInput(reason=msg)
+
+ def _connector_to_hostname_prefix(self, connector):
+ """Translate connector info to storage system host name.
+
+ Translate a host's name and IP to the prefix of its hostname on the
+ storage subsystem. We create a host name from the host and
+ IP address, replacing any invalid characters (at most 55 characters),
+ and adding a random 8-character suffix to avoid collisions. The total
+ length should be at most 63 characters.
+
+ """
+
+ # Build cleanup translation tables for host names
+ invalid_ch_in_host = ''
+ for num in range(0, 128):
+ ch = six.text_type(chr(num))
+ if not ch.isalnum() and ch not in [' ', '.', '-', '_']:
+ invalid_ch_in_host = invalid_ch_in_host + ch
+
+ host_name = connector['host']
+ if isinstance(host_name, unicode):
+ unicode_host_name_filter = dict((ord(unicode(char)), u'-')
+ for char in invalid_ch_in_host)
+ host_name = host_name.translate(unicode_host_name_filter)
+ elif isinstance(host_name, str):
+ string_host_name_filter = string.maketrans(
+ invalid_ch_in_host, '-' * len(invalid_ch_in_host))
+ host_name = host_name.translate(string_host_name_filter)
+ else:
+ msg = (_('_create_host: Can not clean host name. Host name '
+ 'is not unicode or string.'))
+ LOG.error(msg)
+ raise exception.NoValidHost(reason=msg)
+
+ host_name = six.text_type(host_name)
+
+ # FlashSystem family doesn't like hostname that starts with number.
+ if not re.match('^[A-Za-z]', host_name):
+ host_name = '_' + host_name
+
+ return host_name[:55]
+
+ def _copy_vdisk_data(self, src_vdisk_name, src_vdisk_id,
+ dest_vdisk_name, dest_vdisk_id):
+ """Copy data from src vdisk to dest vdisk.
+
+ To be able to copy data between vdisks, we must ensure that both
+ vdisks have been mapped to host. If vdisk has not been mapped,
+ it must be mapped firstly. When data copy completed, vdisk
+ should be restored to previous mapped or non-mapped status.
+ """
+
+ LOG.debug('enter: _copy_vdisk_data: %(src)s -> %(dest)s.'
+ % {'src': src_vdisk_name, 'dest': dest_vdisk_name})
+
+ connector = utils.brick_get_connector_properties()
+ (src_map, src_lun_id) = self._is_vdisk_map(
+ src_vdisk_name, connector)
+ (dest_map, dest_lun_id) = self._is_vdisk_map(
+ dest_vdisk_name, connector)
+
+ src_map_device = None
+ src_properties = None
+ dest_map_device = None
+ dest_properties = None
+
+ try:
+ if not src_map:
+ src_lun_id = self._map_vdisk_to_host(src_vdisk_name,
+ connector)
+ if not dest_map:
+ dest_lun_id = self._map_vdisk_to_host(dest_vdisk_name,
+ connector)
+ src_properties = self._get_vdisk_map_properties(
+ connector, src_lun_id, src_vdisk_name,
+ src_vdisk_id, self._get_vdisk_params(None))
+ src_map_device = self._scan_device(src_properties)
+
+ dest_properties = self._get_vdisk_map_properties(
+ connector, dest_lun_id, dest_vdisk_name,
+ dest_vdisk_id, self._get_vdisk_params(None))
+ dest_map_device = self._scan_device(dest_properties)
+
+ src_vdisk_attr = self._get_vdisk_attributes(src_vdisk_name)
+
+ # vdisk capacity is bytes, translate into MB
+ size_in_mb = int(src_vdisk_attr['capacity']) / units.Mi
+ volume_utils.copy_volume(
+ src_map_device['path'],
+ dest_map_device['path'],
+ size_in_mb,
+ self.configuration.volume_dd_blocksize)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_LE('_copy_vdisk_data: Failed to '
+ 'copy %(src)s to %(dest)s.')
+ % {'src': src_vdisk_name, 'dest': dest_vdisk_name})
+ finally:
+ if not dest_map:
+ self._unmap_vdisk_from_host(dest_vdisk_name, connector)
+ self._remove_device(dest_properties, dest_map_device)
+ if not src_map:
+ self._unmap_vdisk_from_host(src_vdisk_name, connector)
+ self._remove_device(src_properties, src_map_device)
+
+ LOG.debug(
+ 'leave: _copy_vdisk_data: %(src)s -> %(dest)s.'
+ % {'src': src_vdisk_name, 'dest': dest_vdisk_name})
+
+ def _create_and_copy_vdisk_data(self, src_vdisk_name, src_vdisk_id,
+ dest_vdisk_name, dest_vdisk_id):
+ vdisk_attr = self._get_vdisk_attributes(src_vdisk_name)
+ self._driver_assert(
+ vdisk_attr is not None,
+ (_('_create_and_copy_vdisk_data: Failed to get attributes for '
+ 'vdisk %s.') % src_vdisk_name))
+
+ self._create_vdisk(dest_vdisk_name, vdisk_attr['capacity'], 'b', None)
+
+ # create a timer to lock vdisk that will be used to data copy
+ timer = loopingcall.FixedIntervalLoopingCall(
+ self._set_vdisk_copy_in_progress,
+ [src_vdisk_name, dest_vdisk_name])
+ timer.start(interval=self._check_lock_interval).wait()
+ timer.stop()
+
+ try:
+ self._copy_vdisk_data(src_vdisk_name, src_vdisk_id,
+ dest_vdisk_name, dest_vdisk_id)
+ finally:
+ self._unset_vdisk_copy_in_progress(
+ [src_vdisk_name, dest_vdisk_name])
+
+ def _create_host(self, connector):
+ """Create a new host on the storage system.
+
+ We create a host and associate it with the given connection
+ information.
+
+ """
+
+ LOG.debug('enter: _create_host: host %s.' % connector['host'])
+
+ rand_id = six.text_type(random.randint(0, 99999999)).zfill(8)
+ host_name = '%s-%s' % (self._connector_to_hostname_prefix(connector),
+ rand_id)
+
+ ports = []
+ if 'FC' == self._protocol and 'wwpns' in connector:
+ for wwpn in connector['wwpns']:
+ ports.append('-hbawwpn %s' % wwpn)
+
+ self._driver_assert(len(ports),
+ (_('_create_host: No connector ports.')))
+ port1 = ports.pop(0)
+ arg_name, arg_val = port1.split()
+ ssh_cmd = ['svctask', 'mkhost', '-force', arg_name, arg_val, '-name',
+ '"%s"' % host_name]
+ out, err = self._ssh(ssh_cmd)
+ self._assert_ssh_return('successfully created' in out,
+ '_create_host', ssh_cmd, out, err)
+
+ for port in ports:
+ arg_name, arg_val = port.split()
+ ssh_cmd = ['svctask', 'addhostport', '-force',
+ arg_name, arg_val, host_name]
+ out, err = self._ssh(ssh_cmd)
+ self._assert_ssh_return(
+ (len(out.strip()) == 0),
+ '_create_host', ssh_cmd, out, err)
+
+ LOG.debug(
+ 'leave: _create_host: host %(host)s - %(host_name)s.' %
+ {'host': connector['host'], 'host_name': host_name})
+
+ return host_name
+
+ def _create_vdisk(self, name, size, unit, opts):
+ """Create a new vdisk."""
+
+ LOG.debug('enter: _create_vdisk: vdisk %s.' % name)
+
+ ssh_cmd = ['svctask', 'mkvdisk', '-name', name, '-mdiskgrp',
+ FLASHSYSTEM_VOLPOOL_NAME, '-iogrp',
+ six.text_type(FLASHSYSTEM_VOL_IOGRP),
+ '-size', size, '-unit', unit]
+ out, err = self._ssh(ssh_cmd)
+ self._assert_ssh_return(len(out.strip()), '_create_vdisk',
+ ssh_cmd, out, err)
+
+ # Ensure that the output is as expected
+ match_obj = re.search(
+ 'Virtual Disk, id \[([0-9]+)\], successfully created', out)
+
+ self._driver_assert(
+ match_obj is not None,
+ (_('_create_vdisk %(name)s - did not find '
+ 'success message in CLI output.\n '
+ 'stdout: %(out)s\n stderr: %(err)s')
+ % {'name': name, 'out': six.text_type(out),
+ 'err': six.text_type(err)}))
+
+ LOG.debug('leave: _create_vdisk: vdisk %s.' % name)
+
+ def _delete_host(self, host_name):
+ """Delete a host on the storage system."""
+
+ LOG.debug('enter: _delete_host: host %s.' % host_name)
+
+ ssh_cmd = ['svctask', 'rmhost', host_name]
+ out, err = self._ssh(ssh_cmd)
+ # No output should be returned from rmhost
+ self._assert_ssh_return(
+ len(out.strip()) == 0,
+ '_delete_host', ssh_cmd, out, err)
+
+ LOG.debug('leave: _delete_host: host %s.' % host_name)
+
+ def _delete_vdisk(self, name, force):
+ """Deletes existing vdisks."""
+
+ LOG.debug('enter: _delete_vdisk: vdisk %s.' % name)
+
+ # Try to delete volume only if found on the storage
+ vdisk_defined = self._is_vdisk_defined(name)
+ if not vdisk_defined:
+ LOG.warning(_LW('warning: Tried to delete vdisk %s but '
+ 'it does not exist.') % name)
+ return
+
+ ssh_cmd = ['svctask', 'rmvdisk', '-force', name]
+ if not force:
+ ssh_cmd.remove('-force')
+ out, err = self._ssh(ssh_cmd)
+ # No output should be returned from rmvdisk
+ self._assert_ssh_return(
+ len(out.strip()) == 0,
+ ('_delete_vdisk %(name)s') % {'name': name},
+ ssh_cmd, out, err)
+
+ LOG.debug('leave: _delete_vdisk: vdisk %s.' % name)
+
+ def _driver_assert(self, assert_condition, exception_message):
+ """Internal assertion mechanism for CLI output."""
+ if not assert_condition:
+ LOG.error(exception_message)
+ raise exception.VolumeBackendAPIException(data=exception_message)
+
+ def _execute_command_and_parse_attributes(self, ssh_cmd):
+ """Execute command on the FlashSystem and parse attributes.
+
+ Exception is raised if the information from the system
+ can not be obtained.
+
+ """
+
+ LOG.debug(
+ 'enter: _execute_command_and_parse_attributes: '
+ 'command: %s.' % six.text_type(ssh_cmd))
+
+ try:
+ out, err = self._ssh(ssh_cmd)
+ except processutils.ProcessExecutionError:
+ LOG.warning(_LW('_execute_command_and_parse_attributes: Failed to '
+ 'run command: %s.') % six.text_type(ssh_cmd))
+ # Does not raise exception when command encounters error.
+ # Only return and the upper logic decides what to do.
+ return None
+
+ self._assert_ssh_return(
+ len(out),
+ '_execute_command_and_parse_attributes', ssh_cmd, out, err)
+
+ attributes = {}
+ for attrib_line in out.split('\n'):
+ # If '!' not found, return the string and two empty strings
+ attrib_name, foo, attrib_value = attrib_line.partition('!')
+ if attrib_name is not None and len(attrib_name.strip()):
+ self._append_dict(attributes, attrib_name, attrib_value)
+
+ LOG.debug(
+ 'leave: _execute_command_and_parse_attributes: '
+ 'command: %(cmd)s attributes: %(attr)s.'
+ % {'cmd': six.text_type(ssh_cmd),
+ 'attr': six.text_type(attributes)})
+
+ return attributes
+
+ def _find_host_exhaustive(self, connector, hosts):
+ for host in hosts:
+ ssh_cmd = ['svcinfo', 'lshost', '-delim', '!', host]
+ out, err = self._ssh(ssh_cmd)
+ self._assert_ssh_return(
+ len(out.strip()),
+ '_find_host_exhaustive', ssh_cmd, out, err)
+ for attr_line in out.split('\n'):
+ # If '!' not found, return the string and two empty strings
+ attr_name, foo, attr_val = attr_line.partition('!')
+ if (attr_name == 'WWPN' and
+ 'wwpns' in connector and attr_val.lower() in
+ map(str.lower, map(str, connector['wwpns']))):
+ return host
+ return None
+
+ def _get_hdr_dic(self, header, row, delim):
+ """Return CLI row data as a dictionary indexed by names from header.
+ string. The strings are converted to columns using the delimiter in
+ delim.
+ """
+
+ attributes = header.split(delim)
+ values = row.split(delim)
+ self._driver_assert(
+ len(values) == len(attributes),
+ (_('_get_hdr_dic: attribute headers and values do not match.\n '
+ 'Headers: %(header)s\n Values: %(row)s.')
+ % {'header': six.text_type(header), 'row': six.text_type(row)}))
+ dic = dict((a, v) for a, v in map(None, attributes, values))
+ return dic
+
+ def _get_conn_fc_wwpns(self):
+ wwpns = []
+
+ cmd = ['svcinfo', 'lsportfc']
+
+ generator = self._port_conf_generator(cmd)
+ header = next(generator, None)
+ if not header:
+ return wwpns
+
+ for port_data in generator:
+ try:
+ if port_data['status'] == 'active':
+ wwpns.append(port_data['WWPN'])
+ except KeyError:
+ self._handle_keyerror('lsportfc', header)
+
+ return wwpns
+
+ def _get_fc_wwpns(self):
+ for key in self._storage_nodes:
+ node = self._storage_nodes[key]
+ ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!', node['id']]
+ attributes = self._execute_command_and_parse_attributes(ssh_cmd)
+ wwpns = set(node['WWPN'])
+ for i, s in zip(attributes['port_id'], attributes['port_status']):
+ if 'unconfigured' != s:
+ wwpns.add(i)
+ node['WWPN'] = list(wwpns)
+ LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s.')
+ % {'node': node['id'], 'wwpn': node['WWPN']})
+
+ def _get_host_from_connector(self, connector):
+ """List the hosts defined in the storage.
+
+ Return the host name with the given connection info, or None if there
+ is no host fitting that information.
+
+ """
+
+ LOG.debug('enter: _get_host_from_connector: %s.' % connector)
+
+ # Get list of host in the storage
+ ssh_cmd = ['svcinfo', 'lshost', '-delim', '!']
+ out, err = self._ssh(ssh_cmd)
+
+ if not len(out.strip()):
+ return None
+
+ # If we have FC information, we have a faster lookup option
+ hostname = None
+
+ host_lines = out.strip().split('\n')
+ self._assert_ssh_return(
+ len(host_lines),
+ '_get_host_from_connector', ssh_cmd, out, err)
+ header = host_lines.pop(0).split('!')
+ self._assert_ssh_return(
+ 'name' in header,
+ '_get_host_from_connector', ssh_cmd, out, err)
+ name_index = header.index('name')
+ hosts = map(lambda x: x.split('!')[name_index], host_lines)
+ hostname = self._find_host_exhaustive(connector, hosts)
+
+ LOG.debug('leave: _get_host_from_connector: host %s.' % hostname)
+
+ return hostname
+
+ def _get_hostvdisk_mappings(self, host_name):
+ """Return the defined storage mappings for a host."""
+
+ return_data = {}
+ ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', host_name]
+ out, err = self._ssh(ssh_cmd)
+
+ mappings = out.strip().split('\n')
+ if len(mappings):
+ header = mappings.pop(0)
+ for mapping_line in mappings:
+ mapping_data = self._get_hdr_dic(header, mapping_line, '!')
+ return_data[mapping_data['vdisk_name']] = mapping_data
+
+ return return_data
+
+ def _get_vdisk_attributes(self, vdisk_name):
+ """Return vdisk attributes
+
+ Exception is raised if the information from system can not be
+ parsed/matched to a single vdisk.
+ """
+
+ ssh_cmd = [
+ 'svcinfo', 'lsvdisk', '-bytes', '-delim', '!', vdisk_name]
+
+ return self._execute_command_and_parse_attributes(ssh_cmd)
+
+ def _get_vdiskhost_mappings(self, vdisk_name):
+ """Return the defined storage mappings for a vdisk."""
+
+ return_data = {}
+ ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', vdisk_name]
+ out, err = self._ssh(ssh_cmd)
+
+ mappings = out.strip().split('\n')
+ if len(mappings):
+ header = mappings.pop(0)
+ for mapping_line in mappings:
+ mapping_data = self._get_hdr_dic(header, mapping_line, '!')
+ return_data[mapping_data['host_name']] = mapping_data
+
+ return return_data
+
+ def _get_vdisk_map_properties(
+ self, connector, lun_id, vdisk_name, vdisk_id, vdisk_params):
+ """Get the map properties of vdisk."""
+
+ LOG.debug(
+ 'enter: _get_vdisk_map_properties: vdisk '
+ '%(vdisk_name)s.' % {'vdisk_name': vdisk_name})
+
+ preferred_node = '0'
+ IO_group = '0'
+
+ # Get preferred node and other nodes in I/O group
+ preferred_node_entry = None
+ io_group_nodes = []
+ for k, node in self._storage_nodes.iteritems():
+ if vdisk_params['protocol'] != node['protocol']:
+ continue
+ if node['id'] == preferred_node:
+ preferred_node_entry = node
+ if node['IO_group'] == IO_group:
+ io_group_nodes.append(node)
+
+ if not len(io_group_nodes):
+ msg = (_('_get_vdisk_map_properties: No node found in '
+ 'I/O group %(gid)s for volume %(vol)s.')
+ % {'gid': IO_group, 'vol': vdisk_name})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ if not preferred_node_entry and not vdisk_params['multipath']:
+ # Get 1st node in I/O group
+ preferred_node_entry = io_group_nodes[0]
+ LOG.warning(_LW('_get_vdisk_map_properties: Did not find a '
+ 'preferred node for vdisk %s.') % vdisk_name)
+ properties = {}
+ properties['target_discovered'] = False
+ properties['target_lun'] = lun_id
+ properties['volume_id'] = vdisk_id
+
+ type_str = 'fibre_channel'
+ conn_wwpns = self._get_conn_fc_wwpns()
+
+ if len(conn_wwpns) == 0:
+ msg = (_('_get_vdisk_map_properties: Could not get FC '
+ 'connection information for the host-volume '
+ 'connection. Is the host configured properly '
+ 'for FC connections?'))
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ properties['target_wwn'] = conn_wwpns
+
+ if "zvm_fcp" in connector:
+ properties['zvm_fcp'] = connector['zvm_fcp']
+
+ properties['initiator_target_map'] = self._build_initiator_target_map(
+ connector['wwpns'], conn_wwpns)
+
+ LOG.debug(
+ 'leave: _get_vdisk_map_properties: vdisk '
+ '%(vdisk_name)s.' % {'vdisk_name': vdisk_name})
+
+ return {'driver_volume_type': type_str, 'data': properties}
+
+ def _get_vdisk_params(self, type_id):
+ params = self._build_default_params()
+ if type_id:
+ ctxt = context.get_admin_context()
+ volume_type = volume_types.get_volume_type(ctxt, type_id)
+ specs = volume_type.get('extra_specs')
+ for k, value in specs.iteritems():
+ # Get the scope, if using scope format
+ key_split = k.split(':')
+ if len(key_split) == 1:
+ scope = None
+ key = key_split[0]
+ else:
+ scope = key_split[0]
+ key = key_split[1]
+
+ # We generally do not look at capabilities in the driver, but
+ # protocol is a special case where the user asks for a given
+ # protocol and we want both the scheduler and the driver to act
+ # on the value.
+ if ((not scope or scope == 'capabilities') and
+ key == 'storage_protocol'):
+ scope = None
+ key = 'protocol'
+
+ # Anything keys that the driver should look at should have the
+ # 'drivers' scope.
+ if scope and scope != "drivers":
+ continue
+
+ if key in params:
+ this_type = type(params[key]).__name__
+ if this_type == 'int':
+ value = int(value)
+ elif this_type == 'bool':
+ value = strutils.bool_from_string(value)
+ params[key] = value
+
+ self._check_vdisk_params(params)
+
+ return params
+
+ def _handle_keyerror(self, function, header):
+ msg = (_('Did not find expected column in %(fun)s: %(hdr)s.')
+ % {'fun': function, 'hdr': header})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ def _is_vdisk_defined(self, vdisk_name):
+ """Check if vdisk is defined."""
+ LOG.debug('enter: _is_vdisk_defined: vdisk %s.' % vdisk_name)
+
+ vdisk_attributes = self._get_vdisk_attributes(vdisk_name)
+
+ LOG.debug(
+ 'leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s.'
+ % {'vol': vdisk_name, 'str': vdisk_attributes is not None})
+
+ if vdisk_attributes is None:
+ return False
+ else:
+ return True
+
+ def _is_vdisk_copy_in_progress(self, vdisk_name):
+ LOG.debug(
+ '_is_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.'
+ % {'vdisk': vdisk_name,
+ 'vdisk_in_progress':
+ six.text_type(self._vdisk_copy_in_progress)})
+ if vdisk_name not in self._vdisk_copy_in_progress:
+ LOG.debug(
+ '_is_vdisk_copy_in_progress: '
+ 'vdisk copy is not in progress.')
+ raise loopingcall.LoopingCallDone(retvalue=True)
+
+ def _is_vdisk_map(self, vdisk_name, connector):
+ """Check if vdisk is mapped.
+
+ If map, return True and lun id.
+ If not map, return False and expected lun id.
+
+ """
+
+ LOG.debug('enter: _is_vdisk_map: %(src)s.' % {'src': vdisk_name})
+
+ map_flag = False
+ result_lun = '-1'
+
+ host_name = self._get_host_from_connector(connector)
+ if host_name is None:
+ return (map_flag, int(result_lun))
+
+ mapping_data = self._get_hostvdisk_mappings(host_name)
+
+ if vdisk_name in mapping_data:
+ map_flag = True
+ result_lun = mapping_data[vdisk_name]['SCSI_id']
+ else:
+ lun_used = [int(v['SCSI_id']) for v in mapping_data.values()]
+ lun_used.sort()
+
+ # Start from 1 due to problems with lun id being 0.
+ result_lun = 1
+ for lun_id in lun_used:
+ if result_lun < lun_id:
+ break
+ elif result_lun == lun_id:
+ result_lun += 1
+
+ LOG.debug(
+ 'leave: _is_vdisk_map: %(src)s '
+ 'mapped %(map_flag)s %(result_lun)s.'
+ % {'src': vdisk_name,
+ 'map_flag': six.text_type(map_flag),
+ 'result_lun': result_lun})
+
+ return (map_flag, int(result_lun))
+
+ def _log_cli_output_error(self, function, cmd, out, err):
+ LOG.error(_LE('%(fun)s: Failed with unexpected CLI output.\n '
+ 'Command: %(cmd)s\nstdout: %(out)s\nstderr: %(err)s\n')
+ % {'fun': function,
+ 'cmd': cmd,
+ 'out': six.text_type(out),
+ 'err': six.text_type(err)})
+
+ def _map_vdisk_to_host(self, vdisk_name, connector):
+ """Create a mapping between a vdisk to a host."""
+
+ LOG.debug(
+ 'enter: _map_vdisk_to_host: vdisk %(vdisk_name)s to '
+ 'host %(host)s.'
+ % {'vdisk_name': vdisk_name, 'host': connector})
+
+ # Check if a host object is defined for this host name
+ host_name = self._get_host_from_connector(connector)
+ if host_name is None:
+ # Host does not exist - add a new host to FlashSystem
+ host_name = self._create_host(connector)
+ # Verify that create_new_host succeeded
+ self._driver_assert(
+ host_name is not None,
+ (_('_create_host failed to return the host name.')))
+
+ (map_flag, result_lun) = self._is_vdisk_map(vdisk_name, connector)
+
+ # Volume is not mapped to host, create a new LUN
+ if not map_flag:
+ ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', host_name,
+ '-scsi', six.text_type(result_lun), vdisk_name]
+ out, err = self._ssh(ssh_cmd, check_exit_code=False)
+ if err and err.startswith('CMMVC6071E'):
+ if not self.configuration.flashsystem_multihostmap_enabled:
+ msg = (_('flashsystem_multihostmap_enabled is set '
+ 'to False, not allow multi host mapping. '
+ 'CMMVC6071E The VDisk-to-host mapping '
+ 'was not created because the VDisk is '
+ 'already mapped to a host.'))
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ for i in range(len(ssh_cmd)):
+ if ssh_cmd[i] == 'mkvdiskhostmap':
+ ssh_cmd.insert(i + 1, '-force')
+
+ # try to map one volume to multiple hosts
+ out, err = self._ssh(ssh_cmd)
+ LOG.info(_LI('Volume %s is mapping to multiple hosts.')
+ % vdisk_name)
+ self._assert_ssh_return(
+ 'successfully created' in out,
+ '_map_vdisk_to_host', ssh_cmd, out, err)
+ else:
+ self._assert_ssh_return(
+ 'successfully created' in out,
+ '_map_vdisk_to_host', ssh_cmd, out, err)
+
+ LOG.debug(
+ ('leave: _map_vdisk_to_host: LUN %(result_lun)s, vdisk '
+ '%(vdisk_name)s, host %(host_name)s.')
+ % {'result_lun': result_lun,
+ 'vdisk_name': vdisk_name, 'host_name': host_name})
+
+ return int(result_lun)
+
+ def _port_conf_generator(self, cmd):
+ ssh_cmd = cmd + ['-delim', '!']
+ out, err = self._ssh(ssh_cmd)
+
+ if not len(out.strip()):
+ return
+ port_lines = out.strip().split('\n')
+ if not len(port_lines):
+ return
+
+ header = port_lines.pop(0)
+ yield header
+ for portip_line in port_lines:
+ try:
+ port_data = self._get_hdr_dic(header, portip_line, '!')
+ except exception.VolumeBackendAPIException:
+ with excutils.save_and_reraise_exception():
+ self._log_cli_output_error('_port_conf_generator',
+ ssh_cmd, out, err)
+ yield port_data
+
+ def _remove_device(self, properties, device):
+ LOG.debug('enter: _remove_device')
+
+ if not properties or not device:
+ LOG.warning(_LW('_remove_device: invalid properties or device.'))
+ return
+
+ use_multipath = self.configuration.use_multipath_for_image_xfer
+ device_scan_attempts = self.configuration.num_volume_device_scan_tries
+ protocol = properties['driver_volume_type']
+ connector = utils.brick_get_connector(protocol,
+ use_multipath=use_multipath,
+ device_scan_attempts=
+ device_scan_attempts,
+ conn=properties)
+
+ connector.disconnect_volume(properties['data'], device)
+
+ LOG.debug('leave: _remove_device')
+
+ def _scan_device(self, properties):
+ LOG.debug('enter: _scan_device')
+
+ use_multipath = self.configuration.use_multipath_for_image_xfer
+ device_scan_attempts = self.configuration.num_volume_device_scan_tries
+ protocol = properties['driver_volume_type']
+ connector = utils.brick_get_connector(protocol,
+ use_multipath=use_multipath,
+ device_scan_attempts=
+ device_scan_attempts,
+ conn=properties)
+ device = connector.connect_volume(properties['data'])
+ host_device = device['path']
+
+ if not connector.check_valid_device(host_device):
+ msg = (_('Unable to access the backend storage '
+ 'via the path %(path)s.') % {'path': host_device})
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ return device
+
+ LOG.debug('leave: _scan_device')
+
+ def _unmap_vdisk_from_host(self, vdisk_name, connector):
+ if 'host' in connector:
+ host_name = self._get_host_from_connector(connector)
+ self._driver_assert(
+ host_name is not None,
+ (_('_get_host_from_connector failed to return the host name '
+ 'for connector.')))
+ else:
+ host_name = None
+
+ # Check if vdisk-host mapping exists, remove if it does. If no host
+ # name was given, but only one mapping exists, we can use that.
+ mapping_data = self._get_vdiskhost_mappings(vdisk_name)
+ if len(mapping_data) == 0:
+ LOG.warning(_LW('_unmap_vdisk_from_host: No mapping of volume '
+ '%(vol_name)s to any host found.')
+ % {'vol_name': vdisk_name})
+ return
+ if host_name is None:
+ if len(mapping_data) > 1:
+ LOG.warning(_LW('_unmap_vdisk_from_host: Multiple mappings of '
+ 'volume %(vdisk_name)s found, no host '
+ 'specified.')
+ % {'vdisk_name': vdisk_name})
+ return
+ else:
+ host_name = mapping_data.keys()[0]
+ else:
+ if host_name not in mapping_data:
+ LOG.error(_LE('_unmap_vdisk_from_host: No mapping of volume '
+ '%(vol_name)s to host %(host_name)s found.')
+ % {'vol_name': vdisk_name, 'host_name': host_name})
+ return
+
+ # We have a valid host_name now
+ ssh_cmd = ['svctask', 'rmvdiskhostmap',
+ '-host', host_name, vdisk_name]
+ out, err = self._ssh(ssh_cmd)
+ # Verify CLI behaviour - no output is returned from rmvdiskhostmap
+ self._assert_ssh_return(
+ len(out.strip()) == 0,
+ '_unmap_vdisk_from_host', ssh_cmd, out, err)
+
+ # If this host has no more mappings, delete it
+ mapping_data = self._get_hostvdisk_mappings(host_name)
+ if not mapping_data:
+ self._delete_host(host_name)
+
+ def _update_volume_stats(self):
+ """Retrieve stats info from volume group."""
+
+ LOG.debug("Updating volume stats.")
+ data = {}
+
+ data['vendor_name'] = 'IBM'
+ data['driver_version'] = self.VERSION
+ data['storage_protocol'] = self._protocol
+
+ data['total_capacity_gb'] = 0
+ data['free_capacity_gb'] = 0
+ data['reserved_percentage'] = self.configuration.reserved_percentage
+ data['QoS_support'] = False
+
+ pool = FLASHSYSTEM_VOLPOOL_NAME
+ backend_name = self.configuration.safe_get('volume_backend_name')
+ if not backend_name:
+ backend_name = '%s_%s' % (self._system_name, pool)
+ data['volume_backend_name'] = backend_name
+
+ ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool]
+ attributes = self._execute_command_and_parse_attributes(ssh_cmd)
+ if not attributes:
+ msg = (_('_update_volume_stats: Could not get storage pool data.'))
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ data['total_capacity_gb'] = (
+ float(attributes['capacity']) / units.Gi)
+ data['free_capacity_gb'] = (
+ float(attributes['free_capacity']) / units.Gi)
+ data['easytier_support'] = False # Do not support easy tier
+ data['location_info'] = (
+ 'FlashSystemDriver:%(sys_id)s:%(pool)s'
+ % {'sys_id': self._system_id, 'pool': pool})
+
+ self._stats = data
+
+ def _set_vdisk_copy_in_progress(self, vdisk_list):
+ LOG.debug(
+ '_set_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.'
+ % {'vdisk': six.text_type(vdisk_list),
+ 'vdisk_in_progress':
+ six.text_type(self._vdisk_copy_in_progress)})
+ get_lock = True
+ self._vdisk_copy_lock.acquire()
+ for vdisk in vdisk_list:
+ if vdisk in self._vdisk_copy_in_progress:
+ get_lock = False
+ break
+ if get_lock:
+ self._vdisk_copy_in_progress.update(vdisk_list)
+ self._vdisk_copy_lock.release()
+ if get_lock:
+ LOG.debug(
+ '_set_vdisk_copy_in_progress: %s.'
+ % six.text_type(self._vdisk_copy_in_progress))
+ raise loopingcall.LoopingCallDone(retvalue=True)
+
+ def _unset_vdisk_copy_in_progress(self, vdisk_list):
+ LOG.debug(
+ '_unset_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.'
+ % {'vdisk': six.text_type(vdisk_list),
+ 'vdisk_in_progress':
+ six.text_type(self._vdisk_copy_in_progress)})
+ self._vdisk_copy_lock.acquire()
+ for vdisk in vdisk_list:
+ if vdisk in self._vdisk_copy_in_progress:
+ self._vdisk_copy_in_progress.remove(vdisk)
+ self._vdisk_copy_lock.release()
+
+ def _wait_vdisk_copy_completed(self, vdisk_name):
+ timer = loopingcall.FixedIntervalLoopingCall(
+ self._is_vdisk_copy_in_progress, vdisk_name)
+ timer.start(interval=self._check_lock_interval).wait()
+ timer.stop()
+
+ def do_setup(self, ctxt):
+ """Check that we have all configuration details from the storage."""
+
+ LOG.debug('enter: do_setup')
+
+ self._context = ctxt
+
+ # Get storage system name and id
+ ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!']
+ attributes = self._execute_command_and_parse_attributes(ssh_cmd)
+ if not attributes or not ('name' in attributes):
+ msg = (_('do_setup: Could not get system name.'))
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ self._system_name = attributes['name']
+ self._system_id = attributes['id']
+
+ # Validate value of open_access_enabled flag, for now only
+ # support when open_access_enabled is off
+ if not attributes or not ('open_access_enabled' in attributes) or (
+ attributes['open_access_enabled'] != 'off'):
+ msg = (_('do_setup: open_access_enabled is not off.'))
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ # Validate that the array exists
+ pool = FLASHSYSTEM_VOLPOOL_NAME
+ ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool]
+ attributes = self._execute_command_and_parse_attributes(ssh_cmd)
+ if not attributes or not ('status' in attributes) or (
+ attributes['status'] == 'offline'):
+ msg = (_('do_setup: Array does not exist or is offline.'))
+ LOG.error(msg)
+ raise exception.InvalidInput(reason=msg)
+
+ # Get the FC names of the FlashSystem nodes
+ ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!']
+ out, err = self._ssh(ssh_cmd)
+ self._assert_ssh_return(
+ len(out.strip()), 'do_setup', ssh_cmd, out, err)
+
+ nodes = out.strip().splitlines()
+ self._assert_ssh_return(len(nodes), 'do_setup', ssh_cmd, out, err)
+ header = nodes.pop(0)
+ for node_line in nodes:
+ try:
+ node_data = self._get_hdr_dic(header, node_line, '!')
+ except exception.VolumeBackendAPIException:
+ with excutils.save_and_reraise_exception():
+ self._log_cli_output_error('do_setup', ssh_cmd, out, err)
+ node = {}
+ try:
+ node['id'] = node_data['id']
+ node['name'] = node_data['name']
+ node['IO_group'] = node_data['IO_group_id']
+ node['WWNN'] = node_data['WWNN']
+ node['status'] = node_data['status']
+ node['WWPN'] = []
+ node['protocol'] = None
+ if node['status'] == 'online':
+ self._storage_nodes[node['id']] = node
+ except KeyError:
+ self._handle_keyerror('lsnode', header)
+
+ # Get the WWPNs of the FlashSystem nodes
+ self._get_fc_wwpns()
+
+ # For each node, check what connection modes it supports. Delete any
+ # nodes that do not support any types (may be partially configured).
+ to_delete = []
+ for k, node in self._storage_nodes.iteritems():
+ if not len(node['WWPN']):
+ to_delete.append(k)
+
+ for delkey in to_delete:
+ del self._storage_nodes[delkey]
+
+ # Make sure we have at least one node configured
+ self._driver_assert(
+ len(self._storage_nodes),
+ 'do_setup: No configured nodes.')
+
+ self._protocol = node['protocol'] = 'FC'
+
+ # Set for vdisk synchronization
+ self._vdisk_copy_in_progress = set()
+ self._vdisk_copy_lock = threading.Lock()
+ self._check_lock_interval = 5
+
+ LOG.debug('leave: do_setup')
+
+ def check_for_setup_error(self):
+ """Ensure that the flags are set properly."""
+ LOG.debug('enter: check_for_setup_error')
+
+ # Check that we have the system ID information
+ if self._system_name is None:
+ msg = (
+ _('check_for_setup_error: Unable to determine system name.'))
+ raise exception.VolumeBackendAPIException(data=msg)
+ if self._system_id is None:
+ msg = (_('check_for_setup_error: Unable to determine system id.'))
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ required_flags = ['san_ip', 'san_ssh_port', 'san_login']
+ for flag in required_flags:
+ if not self.configuration.safe_get(flag):
+ msg = (_('%s is not set.') % flag)
+ raise exception.InvalidInput(reason=msg)
+
+ # Ensure that either password or keyfile were set
+ if not (self.configuration.san_password or
+ self.configuration.san_private_key):
+ msg = (_('check_for_setup_error: Password or SSH private key '
+ 'is required for authentication: set either '
+ 'san_password or san_private_key option.'))
+ raise exception.InvalidInput(reason=msg)
+
+ params = self._build_default_params()
+ self._check_vdisk_params(params)
+
+ LOG.debug('leave: check_for_setup_error')
+
+ def validate_connector(self, connector):
+ """Check connector."""
+ if 'FC' != self._protocol or 'wwpns' not in connector:
+ msg = (_('The connector does not contain the '
+ 'required information.'))
+ LOG.error(msg)
+ raise exception.VolumeDriverException(data=msg)
+
+ def create_volume(self, volume):
+ """Create volume."""
+ vdisk_name = volume['name']
+ vdisk_params = self._get_vdisk_params(volume['volume_type_id'])
+ vdisk_size = six.text_type(volume['size'])
+ return self._create_vdisk(vdisk_name, vdisk_size, 'gb', vdisk_params)
+
+ def delete_volume(self, volume):
+ """Delete volume."""
+ vdisk_name = volume['name']
+ self._wait_vdisk_copy_completed(vdisk_name)
+ self._delete_vdisk(vdisk_name, False)
+
+ def extend_volume(self, volume, new_size):
+ """Extend volume."""
+ LOG.debug('enter: extend_volume: volume %s.' % volume['name'])
+
+ vdisk_name = volume['name']
+ self._wait_vdisk_copy_completed(vdisk_name)
+
+ extend_amt = int(new_size) - volume['size']
+ ssh_cmd = (['svctask', 'expandvdisksize', '-size',
+ six.text_type(extend_amt), '-unit', 'gb', vdisk_name])
+ out, err = self._ssh(ssh_cmd)
+ # No output should be returned from expandvdisksize
+ self._assert_ssh_return(
+ len(out.strip()) == 0,
+ 'extend_volume', ssh_cmd, out, err)
+
+ LOG.debug('leave: extend_volume: volume %s.' % volume['name'])
+
+ @fczm_utils.AddFCZone
+ def initialize_connection(self, volume, connector):
+ """Perform the necessary work so that a FC connection can
+ be made.
+
+ To be able to create a FC connection from a given host to a
+ volume, we must:
+ 1. Translate the given WWNN to a host name
+ 2. Create new host on the storage system if it does not yet exist
+ 3. Map the volume to the host if it is not already done
+ 4. Return the connection information for relevant nodes (in the
+ proper I/O group)
+
+ """
+
+ LOG.debug(
+ 'enter: initialize_connection: volume %(vol)s with '
+ 'connector %(conn)s.' % {'vol': volume, 'conn': connector})
+
+ vdisk_name = volume['name']
+ vdisk_id = volume['id']
+ vdisk_params = self._get_vdisk_params(volume['volume_type_id'])
+
+ self._wait_vdisk_copy_completed(vdisk_name)
+
+ self._driver_assert(
+ self._is_vdisk_defined(vdisk_name),
+ (_('initialize_connection: vdisk %s is not defined.')
+ % vdisk_name))
+
+ lun_id = self._map_vdisk_to_host(vdisk_name, connector)
+
+ properties = {}
+ try:
+ properties = self._get_vdisk_map_properties(
+ connector, lun_id, vdisk_name, vdisk_id, vdisk_params)
+ except exception.VolumeBackendAPIException:
+ with excutils.save_and_reraise_exception():
+ self.terminate_connection(volume, connector)
+ LOG.error(_LE('initialize_connection: Failed to collect '
+ 'return properties for volume %(vol)s and '
+ 'connector %(conn)s.')
+ % {'vol': volume, 'conn': connector})
+
+ LOG.debug(
+ 'leave: initialize_connection:\n volume: %(vol)s\n connector '
+ '%(conn)s\n properties: %(prop)s.'
+ % {'vol': volume,
+ 'conn': connector,
+ 'prop': properties})
+
+ return properties
+
+ @fczm_utils.RemoveFCZone
+ def terminate_connection(self, volume, connector, **kwargs):
+ """Cleanup after connection has been terminated.
+
+ When we clean up a terminated connection between a given connector
+ and volume, we:
+ 1. Translate the given connector to a host name
+ 2. Remove the volume-to-host mapping if it exists
+ 3. Delete the host if it has no more mappings (hosts are created
+ automatically by this driver when mappings are created)
+ """
+ LOG.debug(
+ 'enter: terminate_connection: volume %(vol)s with '
+ 'connector %(conn)s.'
+ % {'vol': volume, 'conn': connector})
+
+ vdisk_name = volume['name']
+ self._wait_vdisk_copy_completed(vdisk_name)
+ self._unmap_vdisk_from_host(vdisk_name, connector)
+
+ properties = {}
+ conn_wwpns = self._get_conn_fc_wwpns()
+ properties['target_wwn'] = conn_wwpns
+ properties['initiator_target_map'] = self._build_initiator_target_map(
+ connector['wwpns'], conn_wwpns)
+
+ LOG.debug(
+ 'leave: terminate_connection: volume %(vol)s with '
+ 'connector %(conn)s.' % {'vol': volume, 'conn': connector})
+
+ return {
+ 'driver_volume_type': 'fibre_channel',
+ 'data': properties
+ }
+
+ def create_snapshot(self, snapshot):
+ """Create snapshot from volume."""
+
+ LOG.debug(
+ 'enter: create_snapshot: create %(snap)s from %(vol)s.'
+ % {'snap': snapshot['name'], 'vol': snapshot['volume']['name']})
+
+ status = snapshot['volume']['status']
+ if status not in ['available', 'in-use']:
+ msg = (_(
+ 'create_snapshot: Volume status must be "available" or '
+ '"in-use" for snapshot. The invalid status is %s.') % status)
+ raise exception.InvalidVolume(msg)
+
+ self._create_and_copy_vdisk_data(snapshot['volume']['name'],
+ snapshot['volume']['id'],
+ snapshot['name'],
+ snapshot['id'])
+
+ LOG.debug(
+ 'leave: create_snapshot: create %(snap)s from %(vol)s.'
+ % {'snap': snapshot['name'], 'vol': snapshot['volume']['name']})
+
+ def delete_snapshot(self, snapshot):
+ """Delete snapshot."""
+
+ LOG.debug(
+ 'enter: delete_snapshot: delete %(snap)s.'
+ % {'snap': snapshot['name']})
+
+ self._wait_vdisk_copy_completed(snapshot['name'])
+
+ self._delete_vdisk(snapshot['name'], False)
+
+ LOG.debug(
+ 'leave: delete_snapshot: delete %(snap)s.'
+ % {'snap': snapshot['name']})
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Create volume from snapshot."""
+
+ LOG.debug(
+ 'enter: create_volume_from_snapshot: create %(vol)s from '
+ '%(snap)s.' % {'vol': volume['name'], 'snap': snapshot['name']})
+
+ if volume['size'] != snapshot['volume_size']:
+ msg = (_('create_volume_from_snapshot: Volume size is different '
+ 'from snapshot based volume.'))
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ status = snapshot['status']
+ if status != 'available':
+ msg = (_('create_volume_from_snapshot: Snapshot status '
+ 'must be "available" for creating volume. '
+ 'The invalid status is: %s.') % status)
+ raise exception.InvalidSnapshot(msg)
+
+ self._create_and_copy_vdisk_data(snapshot['name'],
+ snapshot['id'],
+ volume['name'],
+ volume['id'])
+
+ LOG.debug(
+ 'leave: create_volume_from_snapshot: create %(vol)s from '
+ '%(snap)s.' % {'vol': volume['name'], 'snap': snapshot['name']})
+
+ def create_cloned_volume(self, volume, src_volume):
+ """Create volume from a source volume."""
+
+ LOG.debug('enter: create_cloned_volume: create %(vol)s from %(src)s.'
+ % {'src': src_volume['name'], 'vol': volume['name']})
+
+ if src_volume['size'] != volume['size']:
+ msg = (_('create_cloned_volume: Source and destination '
+ 'size differ.'))
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ self._create_and_copy_vdisk_data(src_volume['name'],
+ src_volume['id'],
+ volume['name'],
+ volume['id'])
+
+ LOG.debug('leave: create_cloned_volume: create %(vol)s from %(src)s.'
+ % {'src': src_volume['name'], 'vol': volume['name']})
+
+ def get_volume_stats(self, refresh=False):
+ """Get volume stats.
+
+ If we haven't gotten stats yet or 'refresh' is True,
+ run update the stats first.
+ """
+ if not self._stats or refresh:
+ self._update_volume_stats()
+
+ return self._stats