self._hosts_list = {}
self._mappings_list = {}
self._fcmappings_list = {}
+ self._fcconsistgrp_list = {}
self._other_pools = {'openstack2': {}, 'openstack3': {}}
self._next_cmd_error = {
'lsportip': '',
'CMMVC5709E': ('', 'CMMVC5709E [-%(VALUE)s] is not a supported '
'parameter.'),
}
- self._transitions = {'begin': {'make': 'idle_or_copied'},
- 'idle_or_copied': {'prepare': 'preparing',
- 'delete': 'end',
- 'delete_force': 'end'},
- 'preparing': {'flush_failed': 'stopped',
- 'wait': 'prepared'},
- 'end': None,
- 'stopped': {'prepare': 'preparing',
- 'delete_force': 'end'},
- 'prepared': {'stop': 'stopped',
- 'start': 'copying'},
- 'copying': {'wait': 'idle_or_copied',
- 'stop': 'stopping'},
- # Assume the worst case where stopping->stopped
- # rather than stopping idle_or_copied
- 'stopping': {'wait': 'stopped'},
- }
+ self._fc_transitions = {'begin': {'make': 'idle_or_copied'},
+ 'idle_or_copied': {'prepare': 'preparing',
+ 'delete': 'end',
+ 'delete_force': 'end'},
+ 'preparing': {'flush_failed': 'stopped',
+ 'wait': 'prepared'},
+ 'end': None,
+ 'stopped': {'prepare': 'preparing',
+ 'delete_force': 'end'},
+ 'prepared': {'stop': 'stopped',
+ 'start': 'copying'},
+ 'copying': {'wait': 'idle_or_copied',
+ 'stop': 'stopping'},
+ # Assume the worst case where stopping->stopped
+ # rather than stopping idle_or_copied
+ 'stopping': {'wait': 'stopped'},
+ }
+
+ self._fc_cg_transitions = {'begin': {'make': 'empty'},
+ 'empty': {'add': 'idle_or_copied'},
+ 'idle_or_copied': {'prepare': 'preparing',
+ 'delete': 'end',
+ 'delete_force': 'end'},
+ 'preparing': {'flush_failed': 'stopped',
+ 'wait': 'prepared'},
+ 'end': None,
+ 'stopped': {'prepare': 'preparing',
+ 'delete_force': 'end'},
+ 'prepared': {'stop': 'stopped',
+ 'start': 'copying',
+ 'delete_force': 'end',
+ 'delete': 'end'},
+ 'copying': {'wait': 'idle_or_copied',
+ 'stop': 'stopping',
+ 'delete_force': 'end',
+ 'delete': 'end'},
+ # Assume the case where stopping->stopped
+ # rather than stopping idle_or_copied
+ 'stopping': {'wait': 'stopped'},
+ }
def _state_transition(self, function, fcmap):
if (function == 'wait' and
- 'wait' not in self._transitions[fcmap['status']]):
+ 'wait' not in self._fc_transitions[fcmap['status']]):
return ('', '')
if fcmap['status'] == 'copying' and function == 'wait':
else:
try:
curr_state = fcmap['status']
- fcmap['status'] = self._transitions[curr_state][function]
+ fcmap['status'] = self._fc_transitions[curr_state][function]
return ('', '')
except Exception:
return self._errors['CMMVC5903E']
+ def _fc_cg_state_transition(self, function, fc_consistgrp):
+ if (function == 'wait' and
+ 'wait' not in self._fc_transitions[fc_consistgrp['status']]):
+ return ('', '')
+
+ try:
+ curr_state = fc_consistgrp['status']
+ fc_consistgrp['status'] \
+ = self._fc_cg_transitions[curr_state][function]
+ return ('', '')
+ except Exception:
+ return self._errors['CMMVC5903E']
+
# Find an unused ID
@staticmethod
def _find_unused_id(d):
'vdisk',
'warning',
'wwpn',
- 'primary'
+ 'primary',
+ 'consistgrp'
]
no_or_one_param_args = [
'autoexpand',
if (self._volumes_list[source]['capacity'] !=
self._volumes_list[target]['capacity']):
- return self._errors['CMMVC5924E']
+ return self._errors['CMMVC5754E']
fcmap_info = {}
fcmap_info['source'] = source
fcmap_info['progress'] = '0'
fcmap_info['autodelete'] = True if 'autodelete' in kwargs else False
fcmap_info['status'] = 'idle_or_copied'
+
+ # Add fcmap to consistency group
+ if 'consistgrp' in kwargs:
+ consistgrp = kwargs['consistgrp']
+
+ # if is digit, assume is cg id, else is cg name
+ cg_id = 0
+ if not consistgrp.isdigit():
+ for consistgrp_key in self._fcconsistgrp_list.keys():
+ if (self._fcconsistgrp_list[consistgrp_key]['name']
+ == consistgrp):
+ cg_id = consistgrp_key
+ fcmap_info['consistgrp'] = consistgrp_key
+ break
+ else:
+ if int(consistgrp) in self._fcconsistgrp_list.keys():
+ cg_id = int(consistgrp)
+
+ # If can't find exist consistgrp id, return not exist error
+ if not cg_id:
+ return self._errors['CMMVC5754E']
+
+ fcmap_info['consistgrp'] = cg_id
+ # Add fcmap to consistgrp
+ self._fcconsistgrp_list[cg_id]['fcmaps'][fcmap_info['id']] = (
+ fcmap_info['name'])
+ self._fc_cg_state_transition('add',
+ self._fcconsistgrp_list[cg_id])
+
self._fcmappings_list[fcmap_info['id']] = fcmap_info
return('FlashCopy Mapping, id [' + fcmap_info['id'] +
return self._print_info_cmd(rows=rows, **kwargs)
+ # Create a FlashCopy mapping
+ def _cmd_mkfcconsistgrp(self, **kwargs):
+ fcconsistgrp_info = {}
+ fcconsistgrp_info['id'] = self._find_unused_id(self._fcconsistgrp_list)
+
+ if 'name' in kwargs:
+ fcconsistgrp_info['name'] = kwargs['name'].strip('\'\"')
+ else:
+ fcconsistgrp_info['name'] = 'fccstgrp' + fcconsistgrp_info['id']
+
+ if 'autodelete' in kwargs:
+ fcconsistgrp_info['autodelete'] = True
+ else:
+ fcconsistgrp_info['autodelete'] = False
+ fcconsistgrp_info['status'] = 'empty'
+ fcconsistgrp_info['start_time'] = None
+ fcconsistgrp_info['fcmaps'] = {}
+
+ self._fcconsistgrp_list[fcconsistgrp_info['id']] = fcconsistgrp_info
+
+ return('FlashCopy Consistency Group, id [' + fcconsistgrp_info['id'] +
+ '], successfully created', '')
+
+ def _cmd_prestartfcconsistgrp(self, **kwargs):
+ if 'obj' not in kwargs:
+ return self._errors['CMMVC5701E']
+ cg_name = kwargs['obj']
+
+ cg_id = 0
+ for cg_id in self._fcconsistgrp_list.keys():
+ if cg_name == self._fcconsistgrp_list[cg_id]['name']:
+ break
+
+ return self._fc_cg_state_transition('prepare',
+ self._fcconsistgrp_list[cg_id])
+
+ def _cmd_startfcconsistgrp(self, **kwargs):
+ if 'obj' not in kwargs:
+ return self._errors['CMMVC5701E']
+ cg_name = kwargs['obj']
+
+ cg_id = 0
+ for cg_id in self._fcconsistgrp_list.keys():
+ if cg_name == self._fcconsistgrp_list[cg_id]['name']:
+ break
+
+ return self._fc_cg_state_transition('start',
+ self._fcconsistgrp_list[cg_id])
+
+ def _cmd_stopfcconsistgrp(self, **kwargs):
+ if 'obj' not in kwargs:
+ return self._errors['CMMVC5701E']
+ id_num = kwargs['obj']
+
+ try:
+ fcconsistgrps = self._fcconsistgrp_list[id_num]
+ except KeyError:
+ return self._errors['CMMVC5753E']
+
+ return self._fc_cg_state_transition('stop', fcconsistgrps)
+
+ def _cmd_rmfcconsistgrp(self, **kwargs):
+ if 'obj' not in kwargs:
+ return self._errors['CMMVC5701E']
+ cg_name = kwargs['obj']
+ force = True if 'force' in kwargs else False
+
+ cg_id = 0
+ for cg_id in self._fcconsistgrp_list.keys():
+ if cg_name == self._fcconsistgrp_list[cg_id]['name']:
+ break
+ if not cg_id:
+ return self._errors['CMMVC5753E']
+ fcconsistgrps = self._fcconsistgrp_list[cg_id]
+
+ function = 'delete_force' if force else 'delete'
+ ret = self._fc_cg_state_transition(function, fcconsistgrps)
+ if fcconsistgrps['status'] == 'end':
+ del self._fcconsistgrp_list[cg_id]
+ return ret
+
+ def _cmd_lsfcconsistgrp(self, **kwargs):
+ rows = []
+
+ if 'obj' not in kwargs:
+ rows.append(['id', 'name', 'status' 'start_time'])
+
+ for fcconsistgrp in self._fcconsistgrp_list.itervalues():
+ rows.append([fcconsistgrp['id'],
+ fcconsistgrp['name'],
+ fcconsistgrp['status'],
+ fcconsistgrp['start_time']])
+ return self._print_info_cmd(rows=rows, **kwargs)
+ else:
+ fcconsistgrp = None
+ cg_id = 0
+ for cg_id in self._fcconsistgrp_list.keys():
+ if self._fcconsistgrp_list[cg_id]['name'] == kwargs['obj']:
+ fcconsistgrp = self._fcconsistgrp_list[cg_id]
+ rows = []
+ rows.append(['id', str(cg_id)])
+ rows.append(['name', fcconsistgrp['name']])
+ rows.append(['status', fcconsistgrp['status']])
+ rows.append(['autodelete', str(fcconsistgrp['autodelete'])])
+ rows.append(['start_time', str(fcconsistgrp['start_time'])])
+
+ for fcmap_id in fcconsistgrp['fcmaps'].keys():
+ rows.append(['FC_mapping_id', str(fcmap_id)])
+ rows.append(['FC_mapping_name',
+ fcconsistgrp['fcmaps'][fcmap_id]])
+
+ if 'delim' in kwargs:
+ for index in range(len(rows)):
+ rows[index] = kwargs['delim'].join(rows[index])
+ self._fc_cg_state_transition('wait', fcconsistgrp)
+ return ('%s' % '\n'.join(rows), '')
+
def _cmd_migratevdisk(self, **kwargs):
if 'mdiskgrp' not in kwargs or 'vdisk' not in kwargs:
return self._errors['CMMVC5707E']
self.driver.delete_volume(volume)
self.db.volume_destroy(self.ctxt, volume['id'])
+ def _create_consistencygroup_in_db(self, **kwargs):
+ cg = testutils.create_consistencygroup(self.ctxt, **kwargs)
+ return cg
+
+ def _create_cgsnapshot_in_db(self, cg_id, **kwargs):
+ cg_snapshot = testutils.create_cgsnapshot(self.ctxt,
+ consistencygroup_id= cg_id,
+ **kwargs)
+
+ cg_id = cg_snapshot['consistencygroup_id']
+ volumes = self.db.volume_get_all_by_group(self.ctxt.elevated(), cg_id)
+
+ if not volumes:
+ msg = _("Consistency group is empty. No cgsnapshot "
+ "will be created.")
+ raise exception.InvalidConsistencyGroup(reason=msg)
+
+ for volume in volumes:
+ testutils.create_snapshot(self.ctxt,
+ volume['id'],
+ cg_snapshot['id'],
+ cg_snapshot['name'],
+ cg_snapshot['id'],
+ "creating")
+
+ return cg_snapshot
+
def _create_test_vol(self, opts):
ctxt = testutils.get_test_admin_context()
type_ref = volume_types.create(ctxt, 'testtype', opts)
self.assertEqual(term_data, term_ret)
+ def test_storwize_consistency_group_snapshot(self):
+ cg_type = self._create_consistency_group_volume_type()
+
+ cg = self._create_consistencygroup_in_db(volume_type_id=cg_type['id'])
+
+ model_update = self.driver.create_consistencygroup(self.ctxt, cg)
+
+ self.assertEqual(model_update['status'],
+ 'available',
+ "CG created failed")
+ # Add volumes to CG
+ self._create_volume(volume_type_id=cg_type['id'],
+ consistencygroup_id=cg['id'])
+ self._create_volume(volume_type_id=cg_type['id'],
+ consistencygroup_id=cg['id'])
+ self._create_volume(volume_type_id=cg_type['id'],
+ consistencygroup_id=cg['id'])
+ cg_snapshot = self._create_cgsnapshot_in_db(cg['id'])
+
+ model_update = self.driver.create_cgsnapshot(self.ctxt, cg_snapshot)
+ self.assertEqual('available',
+ model_update[0]['status'],
+ "CGSnapshot created failed")
+
+ for snapshot in model_update[1]:
+ self.assertEqual('available', snapshot['status'])
+
+ model_update = self.driver.delete_consistencygroup(self.ctxt, cg)
+
+ self.assertEqual('deleted', model_update[0]['status'])
+ for volume in model_update[1]:
+ self.assertEqual('deleted', volume['status'])
+
def _create_volume_type_qos(self, extra_specs, fake_qos):
# Generate a QoS volume type for volume.
if extra_specs:
return replication_type
+ def _create_consistency_group_volume_type(self):
+ # Generate a volume type for volume consistencygroup.
+ spec = {'capabilities:consistencygroup_support': '<is> True'}
+ type_ref = volume_types.create(self.ctxt, "cg", spec)
+
+ cg_type = volume_types.get_volume_type(self.ctxt, type_ref['id'])
+
+ return cg_type
+
def _get_vdisk_uid(self, vdisk_name):
"""Return vdisk_UID for given vdisk.
volume_id,
display_name='test_snapshot',
display_description='this is a test snapshot',
+ cgsnapshot_id = None,
status='creating'):
vol = db.volume_get(ctxt, volume_id)
snap = {}
snap['volume_size'] = vol['size']
snap['display_name'] = display_name
snap['display_description'] = display_description
+ snap['cgsnapshot_id'] = cgsnapshot_id
return db.snapshot_create(ctxt, snap)
1.2.5 - Added support for manage_existing (unmanage is inherited)
1.2.6 - Added QoS support in terms of I/O throttling rate
1.3.1 - Added support for volume replication
+ 1.3.2 - Added support for consistency group
"""
- VERSION = "1.3.1"
+ VERSION = "1.3.2"
VDISKCOPYOPS_INTERVAL = 600
def __init__(self, *args, **kwargs):
return self._stats
+ def create_consistencygroup(self, context, group):
+ """Create a consistency group.
+
+ IBM Storwize will create CG until cg-snapshot creation,
+ db will maintain the volumes and CG relationship.
+ """
+ LOG.debug("Creating consistency group")
+ model_update = {'status': 'available'}
+ return model_update
+
+ def delete_consistencygroup(self, context, group):
+ """Deletes a consistency group.
+
+ IBM Storwize will delete the volumes of the CG.
+ """
+ LOG.debug("deleting consistency group")
+ model_update = {}
+ model_update['status'] = 'deleted'
+ volumes = self.db.volume_get_all_by_group(context, group['id'])
+
+ for volume in volumes:
+ try:
+ self._helpers.delete_vdisk(volume['name'], True)
+ volume['status'] = 'deleted'
+ except exception.VolumeBackendAPIException as err:
+ volume['status'] = 'error_deleting'
+ if model_update['status'] != 'error_deleting':
+ model_update['status'] = 'error_deleting'
+ LOG.error(_LE("Failed to delete the volume %(vol)s of CG. "
+ "Exception: %(exception)s."),
+ {'vol': volume['name'], 'exception': err})
+ return model_update, volumes
+
+ def create_cgsnapshot(self, ctxt, cgsnapshot):
+ """Creates a cgsnapshot."""
+ # Use cgsnapshot id as cg name
+ cg_name = 'cg_snap-' + cgsnapshot['id']
+ # Create new cg as cg_snapshot
+ self._helpers.create_fc_consistgrp(cg_name)
+
+ snapshots = self.db.snapshot_get_all_for_cgsnapshot(
+ ctxt, cgsnapshot['id'])
+ timeout = self.configuration.storwize_svc_flashcopy_timeout
+
+ model_update, snapshots_model = (
+ self._helpers.run_consistgrp_snapshots(cg_name,
+ snapshots,
+ self._state,
+ self.configuration,
+ timeout))
+
+ return model_update, snapshots_model
+
+ def delete_cgsnapshot(self, context, cgsnapshot):
+ """Deletes a cgsnapshot."""
+ cgsnapshot_id = cgsnapshot['id']
+ cg_name = 'cg_snap-' + cgsnapshot_id
+
+ snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
+ cgsnapshot_id)
+
+ model_update, snapshots_model = (
+ self._helpers.delete_consistgrp_snapshots(cg_name,
+ snapshots))
+
+ return model_update, snapshots_model
+
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
data['free_capacity_gb'] = 0 # To be overwritten
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = True
+ data['consistencygroup_support'] = True
pool = self.configuration.storwize_svc_volpool_name
backend_name = self.configuration.safe_get('volume_backend_name')
import random
import re
+import time
import unicodedata
+
from eventlet import greenthread
from oslo.utils import excutils
from oslo.utils import strutils
from cinder.volume import utils
from cinder.volume import volume_types
+INTERVAL_1_SEC = 1
+DEFAULT_TIMEOUT = 15
LOG = logging.getLogger(__name__)
continue
return qos
+ def _wait_for_a_condition(self, testmethod, timeout=None,
+ interval=INTERVAL_1_SEC):
+ start_time = time.time()
+ if timeout is None:
+ timeout = DEFAULT_TIMEOUT
+
+ def _inner():
+ try:
+ testValue = testmethod()
+ except Exception as ex:
+ testValue = False
+ LOG.debug('Helper.'
+ '_wait_for_condition: %(method_name)s '
+ 'execution failed for %(exception)s',
+ {'method_name': testmethod.__name__,
+ 'exception': ex.message})
+ if testValue:
+ raise loopingcall.LoopingCallDone()
+
+ if int(time.time()) - start_time > timeout:
+ msg = (_('CommandLineHelper._wait_for_condition: %s timeout')
+ % testmethod.__name__)
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ timer = loopingcall.FixedIntervalLoopingCall(_inner)
+ timer.start(interval=interval).wait()
+
def get_vdisk_params(self, config, state, type_id, volume_type=None,
volume_metadata=None):
"""Return the parameters for creating the vdisk.
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
+ def start_fc_consistgrp(self, fc_consistgrp):
+ self.ssh.startfcconsistgrp(fc_consistgrp)
+
+ def create_fc_consistgrp(self, fc_consistgrp):
+ self.ssh.mkfcconsistgrp(fc_consistgrp)
+
+ def delete_fc_consistgrp(self, fc_consistgrp):
+ self.ssh.rmfcconsistgrp(fc_consistgrp)
+
+ def stop_fc_consistgrp(self, fc_consistgrp):
+ self.ssh.stopfcconsistgrp(fc_consistgrp)
+
+ def run_consistgrp_snapshots(self, fc_consistgrp, snapshots, state,
+ config, timeout):
+ cgsnapshot = {'status': 'available'}
+ try:
+ for snapshot in snapshots:
+ opts = self.get_vdisk_params(config, state,
+ snapshot['volume_type_id'])
+ self.create_flashcopy_to_consistgrp(snapshot['volume_name'],
+ snapshot['name'],
+ fc_consistgrp,
+ config, opts)
+ snapshot['status'] = 'available'
+
+ self.prepare_fc_consistgrp(fc_consistgrp, timeout)
+ self.start_fc_consistgrp(fc_consistgrp)
+ # There is CG limitation that could not create more than 128 CGs.
+ # After start CG, we delete CG to avoid CG limitation.
+ # Cinder general will maintain the CG and snapshots relationship.
+ self.delete_fc_consistgrp(fc_consistgrp)
+ except exception.VolumeBackendAPIException as err:
+ for snapshot in snapshots:
+ snapshot['status'] = 'error'
+ cgsnapshot['status'] = 'error'
+ # Release cg
+ self.delete_fc_consistgrp(fc_consistgrp)
+ LOG.error(_LE("Failed to create CGSnapshot. "
+ "Exception: %s"), err)
+
+ return cgsnapshot, snapshots
+
+ def delete_consistgrp_snapshots(self, fc_consistgrp, snapshots):
+ """Delete flashcopy maps and consistent group."""
+ cgsnapshot = {'status': 'available'}
+ try:
+ for snapshot in snapshots:
+ self.ssh.rmvdisk(snapshot['name'], True)
+ snapshot['status'] = 'deleted'
+ except exception.VolumeBackendAPIException as err:
+ for snapshot in snapshots:
+ snapshot['status'] = 'error_deleting'
+ cgsnapshot['status'] = 'error_deleting'
+ LOG.error(_LE("Failed to delete the snapshot %(snap)s of "
+ "CGSnapshot. Exception: %(exception)s"),
+ {'snap': snapshot['name'], 'exception': err})
+ return cgsnapshot, snapshots
+
+ def prepare_fc_consistgrp(self, fc_consistgrp, timeout):
+ """Prepare FC Consistency Group."""
+ self.ssh.prestartfcconsistgrp(fc_consistgrp)
+
+ def prepare_fc_consistgrp_success():
+ mapping_ready = False
+ mapping_attrs = self._get_flashcopy_consistgrp_attr(fc_consistgrp)
+ if (mapping_attrs is None or
+ 'status' not in mapping_attrs):
+ pass
+ if mapping_attrs['status'] == 'prepared':
+ mapping_ready = True
+ elif mapping_attrs['status'] == 'stopped':
+ self.ssh.prestartfcconsistgrp(fc_consistgrp)
+ elif mapping_attrs['status'] != 'preparing':
+ msg = (_('Unexpected mapping status %(status)s for mapping'
+ '%(id)s. Attributes: %(attr)s') %
+ {'status': mapping_attrs['status'],
+ 'id': fc_consistgrp,
+ 'attr': mapping_attrs})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ return mapping_ready
+ self._wait_for_a_condition(prepare_fc_consistgrp_success, timeout)
+
def run_flashcopy(self, source, target, timeout, full_copy=True):
"""Create a FlashCopy mapping from the source to the target."""
LOG.debug('enter: run_flashcopy: execute FlashCopy from source '
- '%(source)s to target %(target)s' %
+ '%(source)s to target %(target)s',
{'source': source, 'target': target})
fc_map_id = self.ssh.mkfcmap(source, target, full_copy)
self.ssh.startfcmap(fc_map_id)
LOG.debug('leave: run_flashcopy: FlashCopy started from '
- '%(source)s to %(target)s' %
+ '%(source)s to %(target)s',
+ {'source': source, 'target': target})
+
+ def create_flashcopy_to_consistgrp(self, source, target, consistgrp,
+ config, opts, full_copy=False,
+ pool=None):
+ """Create a FlashCopy mapping and add to consistent group."""
+ LOG.debug('enter: create_flashcopy_to_consistgrp: create FlashCopy'
+ ' from source %(source)s to target %(target)s'
+ 'Then add the flashcopy to %(cg)s',
+ {'source': source, 'target': target, 'cg': consistgrp})
+
+ src_attrs = self.get_vdisk_attributes(source)
+ if src_attrs is None:
+ msg = (_('create_copy: Source vdisk %(src)s '
+ 'does not exist') % {'src': source})
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ src_size = src_attrs['capacity']
+ # In case we need to use a specific pool
+ if not pool:
+ pool = config.storwize_svc_volpool_name
+ self.create_vdisk(target, src_size, 'b', pool, opts)
+
+ self.ssh.mkfcmap(source, target, full_copy, consistgrp)
+
+ LOG.debug('leave: create_flashcopy_to_consistgrp: '
+ 'FlashCopy started from %(source)s to %(target)s',
{'source': source, 'target': target})
def _get_vdisk_fc_mappings(self, vdisk):
return None
return resp[0]
+ def _get_flashcopy_consistgrp_attr(self, fc_map_id):
+ resp = self.ssh.lsfcconsistgrp(fc_map_id)
+ if not len(resp):
+ return None
+ return resp[0]
+
def _check_vdisk_fc_mappings(self, name, allow_snaps=True):
"""FlashCopy mapping check helper."""
LOG.debug('Loopcall: _check_vdisk_fc_mappings(), vdisk %s' % name)
'-unit', 'gb', vdisk])
self.run_ssh_assert_no_output(ssh_cmd)
- def mkfcmap(self, source, target, full_copy):
+ def mkfcmap(self, source, target, full_copy, consistgrp=None):
ssh_cmd = ['svctask', 'mkfcmap', '-source', source, '-target',
target, '-autodelete']
if not full_copy:
ssh_cmd.extend(['-copyrate', '0'])
+ if consistgrp:
+ ssh_cmd.extend(['-consistgrp', consistgrp])
out, err = self._ssh(ssh_cmd, check_exit_code=False)
if 'successfully created' not in out:
msg = (_('CLI Exception output:\n command: %(cmd)s\n '
ssh_cmd = ['svctask', 'startfcmap', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
+ def prestartfcconsistgrp(self, fc_consist_group):
+ ssh_cmd = ['svctask', 'prestartfcconsistgrp', fc_consist_group]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+ def startfcconsistgrp(self, fc_consist_group):
+ ssh_cmd = ['svctask', 'startfcconsistgrp', fc_consist_group]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
+ def stopfcconsistgrp(self, fc_consist_group):
+ ssh_cmd = ['svctask', 'stopfcconsistgrp', fc_consist_group]
+ self.run_ssh_assert_no_output(ssh_cmd)
+
def chfcmap(self, fc_map_id, copyrate='50', autodel='on'):
ssh_cmd = ['svctask', 'chfcmap', '-copyrate', copyrate,
'-autodelete', autodel, fc_map_id]
'id=%s' % fc_map_id, '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
+ def lsfcconsistgrp(self, fc_consistgrp):
+ ssh_cmd = ['svcinfo', 'lsfcconsistgrp', '-delim', '!', fc_consistgrp]
+ out, err = self._ssh(ssh_cmd)
+ return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!',
+ with_header=False)
+
+ def mkfcconsistgrp(self, fc_consist_group):
+ ssh_cmd = ['svctask', 'mkfcconsistgrp', '-name', fc_consist_group]
+ return self.run_ssh_check_created(ssh_cmd)
+
+ def rmfcconsistgrp(self, fc_consist_group):
+ ssh_cmd = ['svctask', 'rmfcconsistgrp', '-force', fc_consist_group]
+ return self.run_ssh_assert_no_output(ssh_cmd)
+
def addvdiskcopy(self, vdisk, dest_pool, params):
ssh_cmd = (['svctask', 'addvdiskcopy'] + params + ['-mdiskgrp',
dest_pool, vdisk])