Adds replication support to the IBM Storwize/SVC driver.
Supports IBM SVC stretched Cluster mode.
DocImpact
Change-Id: Id82623518c3508ad637f77d08699ffebcb44921e
'vdisk',
'warning',
'wwpn',
+ 'primary'
]
no_or_one_param_args = [
'autoexpand',
vol = self._volumes_list[vol_name]
kwargs.pop('obj')
- params = ['name', 'warning', 'udid', 'autoexpand', 'easytier']
+ params = ['name', 'warning', 'udid',
+ 'autoexpand', 'easytier', 'primary']
for key, value in kwargs.iteritems():
if key == 'easytier':
vol['easy_tier'] = value
vol['name'] = value
del self._volumes_list[vol_name]
self._volumes_list[value] = vol
+ if key == 'primary':
+ if value == '0':
+ self._volumes_list[vol_name]['copies']['0']['primary']\
+ = 'yes'
+ self._volumes_list[vol_name]['copies']['1']['primary']\
+ = 'no'
+ elif value == '1':
+ self._volumes_list[vol_name]['copies']['0']['primary']\
+ = 'no'
+ self._volumes_list[vol_name]['copies']['1']['primary']\
+ = 'yes'
+ else:
+ err = self._errors['CMMVC6353E'][1] % {'VALUE': key}
+ return ('', err)
if key in params:
vol[key] = value
else:
return self._errors['CMMVC5701E']
return ('', '')
+ # list vdisk sync process
+ def _cmd_lsvdisksyncprogress(self, **kwargs):
+ if 'obj' not in kwargs:
+ return self._errors['CMMVC5804E']
+ name = kwargs['obj']
+ copy_id = kwargs.get('copy', None)
+ vol = self._volumes_list[name]
+ rows = []
+ rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'progress',
+ 'estimated_completion_time'])
+ copy_found = False
+ for copy in vol['copies'].itervalues():
+ if not copy_id or copy_id == copy['id']:
+ copy_found = True
+ row = [vol['id'], name, copy['id']]
+ if copy['sync'] == 'yes':
+ row.extend(['100', ''])
+ else:
+ row.extend(['50', '140210115226'])
+ copy['sync'] = 'yes'
+ rows.append(row)
+ if not copy_found:
+ return self._errors['CMMVC5804E']
+ return self._print_info_cmd(rows=rows, **kwargs)
+
def _add_host_to_list(self, connector):
host_info = {}
host_info['id'] = self._find_unused_id(self._hosts_list)
self.sim = StorwizeSVCManagementSimulator('openstack')
self.driver.set_fake_storage(self.sim)
+ self.ctxt = context.get_admin_context()
else:
self.driver = storwize_svc.StorwizeSVCDriver(
configuration=conf.Configuration(None))
self.assertEqual(term_data, term_ret)
+ def test_storwize_create_volume_with_strech_cluster_replication(self):
+ # Set replication flag, set pool openstack2 for secondary volume.
+ self._set_flag('storwize_svc_stretched_cluster_partner', 'openstack2')
+
+ # Create a type for repliation.
+ volume = self._generate_vol_info(None, None)
+ volume_type = self._create_replication_volume_type(True)
+ volume['volume_type_id'] = volume_type['id']
+
+ self.driver.do_setup(self.ctxt)
+
+ model_update = self.driver.create_volume(volume)
+ self.assertEqual('copying', model_update['replication_status'])
+
+ volume['replication_status'] = 'copying'
+ volume['replication_extended_status'] = None
+
+ model_update = self.driver.get_replication_status(self.ctxt, volume)
+ self.assertEqual('copying', model_update['replication_status'])
+
+ # Check the volume copy created on pool opentack2.
+ attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
+ self.assertIn('openstack2', attrs['mdisk_grp_name'])
+
+ primary_status = attrs['primary']
+
+ self.driver.promote_replica(self.ctxt, volume)
+ # After promote_replica, primary copy should be swiched.
+ attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
+ self.assertEqual(primary_status[0], attrs['primary'][1])
+ self.assertEqual(primary_status[1], attrs['primary'][0])
+
+ self.driver.delete_volume(volume)
+ attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
+ self.assertIsNone(attrs)
+
+ def test_storwize_create_cloned_volume_with_strech_cluster_replica(self):
+ # Set replication flag, set pool openstack2 for secondary volume.
+ self._set_flag('storwize_svc_stretched_cluster_partner', 'openstack2')
+ self.driver.do_setup(self.ctxt)
+
+ # Create a source volume.
+ src_volume = self._generate_vol_info(None, None)
+ self.driver.create_volume(src_volume)
+
+ # Create a type for repliation.
+ volume = self._generate_vol_info(None, None)
+ volume_type = self._create_replication_volume_type(True)
+ volume['volume_type_id'] = volume_type['id']
+
+ # Create a cloned volume from source volume.
+ model_update = self.driver.create_cloned_volume(volume, src_volume)
+ self.assertEqual('copying', model_update['replication_status'])
+
+ # Check the replication volume created on pool openstack2.
+ attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
+ self.assertIn('openstack2', attrs['mdisk_grp_name'])
+
+ def test_storwize_create_snapshot_volume_with_strech_cluster_replica(self):
+ # Set replication flag, set pool openstack2 for secondary volume.
+ self._set_flag('storwize_svc_stretched_cluster_partner', 'openstack2')
+ self.driver.do_setup(self.ctxt)
+
+ vol1 = self._create_volume()
+ snap = self._generate_vol_info(vol1['name'], vol1['id'])
+ self.driver.create_snapshot(snap)
+ vol2 = self._generate_vol_info(None, None)
+
+ # Create a type for repliation.
+ vol2 = self._generate_vol_info(None, None)
+ volume_type = self._create_replication_volume_type(True)
+ vol2['volume_type_id'] = volume_type['id']
+
+ model_update = self.driver.create_volume_from_snapshot(vol2, snap)
+ self._assert_vol_exists(vol2['name'], True)
+ self.assertEqual('copying', model_update['replication_status'])
+ # Check the replication volume created on pool openstack2.
+ attrs = self.driver._helpers.get_vdisk_attributes(vol2['name'])
+ self.assertIn('openstack2', attrs['mdisk_grp_name'])
+
+ def test_storwize_retype_with_strech_cluster_replication(self):
+ self._set_flag('storwize_svc_stretched_cluster_partner', 'openstack2')
+ self.driver.do_setup(self.ctxt)
+ self.driver.do_setup(None)
+ loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] +
+ ':openstack')
+ cap = {'location_info': loc, 'extent_size': '128'}
+ self.driver._stats = {'location_info': loc}
+ host = {'host': 'foo', 'capabilities': cap}
+ ctxt = context.get_admin_context()
+
+ disable_type = self._create_replication_volume_type(False)
+ enable_type = self._create_replication_volume_type(True)
+
+ diff, equal = volume_types.volume_types_diff(ctxt,
+ disable_type['id'],
+ enable_type['id'])
+
+ volume = self._generate_vol_info(None, None)
+ volume['host'] = host
+ volume['volume_type_id'] = disable_type['id']
+ volume['volume_type'] = disable_type
+ volume['replication_status'] = None
+ volume['replication_extended_status'] = None
+
+ # Create volume which is not volume replication
+ self.driver.create_volume(volume)
+ # volume should be DB object in this parameter
+ model_update = self.driver.get_replication_status(self.ctxt, volume)
+ self.assertIs('error', model_update['replication_status'])
+ # Enable replica
+ self.driver.retype(ctxt, volume, enable_type, diff, host)
+
+ model_update = self.driver.get_replication_status(self.ctxt, volume)
+ self.assertIs('copying', model_update['replication_status'])
+ self.driver.delete_volume(volume)
+
def test_storwize_initiator_target_map_npiv(self):
# Create two volumes to be used in mappings
ctxt = context.get_admin_context()
self.assertEqual(term_data, term_ret)
+ def _create_replication_volume_type(self, enable):
+ # Generate a volume type for volume repliation.
+ if enable:
+ spec = {'capabilities:replication': '<is> True'}
+ type_ref = volume_types.create(self.ctxt, "replication_1", spec)
+ else:
+ spec = {'capabilities:replication': '<is> False'}
+ type_ref = volume_types.create(self.ctxt, "replication_2", spec)
+
+ replication_type = volume_types.get_volume_type(self.ctxt,
+ type_ref['id'])
+
+ return replication_type
+
def _get_vdisk_uid(self, vdisk_name):
"""Return vdisk_UID for given vdisk.
from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers.ibm.storwize_svc import helpers as storwize_helpers
+from cinder.volume.drivers.ibm.storwize_svc import replication as storwize_rep
from cinder.volume.drivers.san import san
from cinder.volume import volume_types
from cinder.zonemanager import utils as fczm_utils
cfg.BoolOpt('storwize_svc_allow_tenant_qos',
default=False,
help='Allow tenants to specify QOS on create'),
+ cfg.StrOpt('storwize_svc_stretched_cluster_partner',
+ default=None,
+ help='If operating in stretched cluster mode, specify the '
+ 'name of the pool in which mirrored copies are stored.'
+ 'Example: "pool2"'),
]
CONF = cfg.CONF
1.2.4 - Fix bug #1278035 (async migration/retype)
1.2.5 - Added support for manage_existing (unmanage is inherited)
1.2.6 - Added QoS support in terms of I/O throttling rate
+ 1.3.1 - Added support for volume replication
"""
- VERSION = "1.2.6"
+ VERSION = "1.3.1"
VDISKCOPYOPS_INTERVAL = 600
def __init__(self, *args, **kwargs):
self._helpers = storwize_helpers.StorwizeHelpers(self._run_ssh)
self._vdiskcopyops = {}
self._vdiskcopyops_loop = None
+ self.replication = None
self._state = {'storage_nodes': {},
'enabled_protocols': set(),
'compression_enabled': False,
# Get storage system name, id, and code level
self._state.update(self._helpers.get_system_info())
+ # Get the replication helpers
+ self.replication = storwize_rep.StorwizeSVCReplication.factory(self)
+
# Validate that the pool exists
pool = self.configuration.storwize_svc_volpool_name
try:
volume_metadata=
volume.get('volume_metadata'))
pool = self.configuration.storwize_svc_volpool_name
- data = self._helpers.create_vdisk(volume['name'], str(volume['size']),
- 'gb', pool, opts)
+ self._helpers.create_vdisk(volume['name'], str(volume['size']),
+ 'gb', pool, opts)
if opts['qos']:
self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
- return data
+
+ model_update = None
+ if 'replication' in opts and opts['replication']:
+ ctxt = context.get_admin_context()
+ model_update = self.replication.create_replica(ctxt, volume)
+ return model_update
def delete_volume(self, volume):
self._helpers.delete_vdisk(volume['name'], False)
if opts['qos']:
self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
+ if 'replication' in opts and opts['replication']:
+ ctxt = context.get_admin_context()
+ replica_status = self.replication.create_replica(ctxt, volume)
+ if replica_status:
+ return replica_status
+
def create_cloned_volume(self, tgt_volume, src_volume):
if src_volume['size'] != tgt_volume['size']:
msg = (_('create_cloned_volume: Source and destination '
if opts['qos']:
self._helpers.add_vdisk_qos(tgt_volume['name'], opts['qos'])
+ if 'replication' in opts and opts['replication']:
+ ctxt = context.get_admin_context()
+ replica_status = self.replication.create_replica(ctxt, tgt_volume)
+ if replica_status:
+ return replica_status
+
def extend_volume(self, volume, new_size):
LOG.debug('enter: extend_volume: volume %s' % volume['id'])
ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'],
self._helpers.extend_vdisk(volume['name'], extend_amt)
LOG.debug('leave: extend_volume: volume %s' % volume['id'])
+ def add_vdisk_copy(self, volume, dest_pool, vol_type):
+ return self._helpers.add_vdisk_copy(volume, dest_pool,
+ vol_type, self._state,
+ self.configuration)
+
def _add_vdisk_copy_op(self, ctxt, volume, new_op):
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
self.db.volume_admin_metadata_delete(ctxt.elevated(), volume['id'],
'vdiskcopyops')
+ def promote_replica(self, ctxt, volume):
+ return self.replication.promote_replica(volume)
+
+ def reenable_replication(self, ctxt, volume):
+ return self.replication.reenable_replication(volume)
+
+ def create_replica_test_volume(self, tgt_volume, src_volume):
+ if src_volume['size'] != tgt_volume['size']:
+ msg = (_('create_cloned_volume: Source and destination '
+ 'size differ.'))
+ LOG.error(msg)
+ raise exception.InvalidInput(message=msg)
+ replica_status = self.replication.test_replica(tgt_volume,
+ src_volume)
+ return replica_status
+
+ def get_replication_status(self, ctxt, volume):
+ return self.replication.get_replication_status(volume)
+
def _check_volume_copy_ops(self):
LOG.debug("enter: update volume copy status")
ctxt = context.get_admin_context()
vol_type = None
self._check_volume_copy_ops()
- new_op = self._helpers.add_vdisk_copy(volume['name'], dest_pool,
- vol_type, self._state,
- self.configuration)
+ new_op = self.add_vdisk_copy(volume['name'], dest_pool, vol_type)
self._add_vdisk_copy_op(ctxt, volume, new_op)
LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s' %
{'id': volume['id'], 'host': host['host']})
new_opts = self._get_vdisk_params(new_type['id'],
volume_type=new_type)
+ # Check if retype affects volume replication
+ model_update = None
+ old_type_replication = old_opts.get('replication', False)
+ new_type_replication = new_opts.get('replication', False)
+
+ # Delete replica if needed
+ if old_type_replication and not new_type_replication:
+ self.replication.delete_replica(volume)
+ model_update = {'replication_status': 'disabled',
+ 'replication_driver_data': None,
+ 'replication_extended_status': None}
+
vdisk_changes = []
need_copy = False
for key in all_keys:
if dest_pool is None:
return False
- retype_iogrp_property(volume, new_opts['iogrp'], old_opts['iogrp'])
+ # If volume is replicated, can't copy
+ if new_type_replication:
+ msg = (_('Unable to retype: Volume %s is replicated.'),
+ volume['id'])
+ raise exception.VolumeDriverException(message=msg)
+
+ retype_iogrp_property(volume,
+ new_opts['iogrp'],
+ old_opts['iogrp'])
try:
- new = self._helpers.add_vdisk_copy(volume['name'], dest_pool,
- new_type, self._state,
- self.configuration)
- self._add_vdisk_copy_op(ctxt, volume, new)
+ new_op = self.add_vdisk_copy(volume['name'],
+ dest_pool,
+ new_type)
+ self._add_vdisk_copy_op(ctxt, volume, new_op)
except exception.VolumeDriverException:
# roll back changing iogrp property
retype_iogrp_property(volume, old_opts['iogrp'],
qos = new_opts['qos'] or old_opts['qos']
if qos:
self._helpers.add_vdisk_qos(volume['name'], qos)
+
+ # Add replica if needed
+ if not old_type_replication and new_type_replication:
+ model_update = self.replication.create_replica(ctxt, volume)
+
LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host['host']})
- return True
+ return True, model_update
def manage_existing(self, volume, ref):
"""Manages an existing vdisk.
{'sys_id': self._state['system_id'],
'pool': pool})
+ if self.replication:
+ data.update(self.replication.get_replication_info())
+
self._stats = data
elif protocol.lower() == 'iscsi':
protocol = 'iSCSI'
+ cluster_partner = config.storwize_svc_stretched_cluster_partner
opt = {'rsize': config.storwize_svc_vol_rsize,
'warning': config.storwize_svc_vol_warning,
'autoexpand': config.storwize_svc_vol_autoexpand,
'protocol': protocol,
'multipath': config.storwize_svc_multipath_enabled,
'iogrp': config.storwize_svc_vol_iogrp,
- 'qos': None}
+ 'qos': None,
+ 'stretched_cluster': cluster_partner,
+ 'replication': False}
return opt
@staticmethod
del words[0]
value = words[0]
+ # We generally do not look at capabilities in the driver, but
+ # replication is a special case where the user asks for
+ # a volume to be replicated, and we want both the scheduler and
+ # the driver to act on the value.
+ if ((not scope or scope == 'capabilities') and
+ key == 'replication'):
+ scope = None
+ key = 'replication'
+ words = value.split()
+ if not (words and len(words) == 2 and words[0] == '<is>'):
+ LOG.error(_('Replication must be specified as '
+ '\'<is> True\' or \'<is> False\'.'))
+ del words[0]
+ value = words[0]
+
# Add the QoS.
if scope and scope == 'qos':
type_fn = self.svc_qos_keys[key]
if volume_type:
qos_specs_id = volume_type.get('qos_specs_id')
specs = dict(volume_type).get('extra_specs')
+
# NOTE(vhou): We prefer the qos_specs association
# and over-ride any existing
# extra-specs settings if present
qos = self._get_qos_from_volume_metadata(volume_metadata)
if len(qos) != 0:
opts['qos'] = qos
+
self.check_vdisk_opts(state, opts)
return opts
attrs = self.get_vdisk_attributes(vdisk_name)
return attrs is not None
+ def find_vdisk_copy_id(self, vdisk, pool):
+ resp = self.ssh.lsvdiskcopy(vdisk)
+ for copy_id, mdisk_grp in resp.select('copy_id', 'mdisk_grp_name'):
+ if mdisk_grp == pool:
+ return copy_id
+ msg = _('Failed to find a vdisk copy in the expected pool.')
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ def get_vdisk_copy_attrs(self, vdisk, copy_id):
+ return self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]
+
+ def get_vdisk_copies(self, vdisk):
+ copies = {'primary': None,
+ 'secondary': None}
+
+ resp = self.ssh.lsvdiskcopy(vdisk)
+ for copy_id, status, sync, primary, mdisk_grp in \
+ resp.select('copy_id', 'status', 'sync',
+ 'primary', 'mdisk_grp_name'):
+ copy = {'copy_id': copy_id,
+ 'status': status,
+ 'sync': sync,
+ 'primary': primary,
+ 'mdisk_grp_name': mdisk_grp,
+ 'sync_progress': None}
+ if copy['sync'] != 'yes':
+ progress_info = self.ssh.lsvdisksyncprogress(vdisk, copy_id)
+ copy['sync_progress'] = progress_info['progress']
+ if copy['primary'] == 'yes':
+ copies['primary'] = copy
+ else:
+ copies['secondary'] = copy
+ return copies
+
+ def check_copy_ok(self, vdisk, pool, copy_type):
+ try:
+ copy_id = self.find_vdisk_copy_id(vdisk, pool)
+ attrs = self.get_vdisk_copy_attrs(vdisk, copy_id)
+ except (exception.VolumeBackendAPIException,
+ exception.VolumeDriverException):
+ extended = ('No %(type)s copy in pool %(pool)s' %
+ {'type': copy_type, 'pool': pool})
+ return ('error', extended)
+ if attrs['status'] != 'online':
+ extended = 'The %s copy is offline' % copy_type
+ return ('error', extended)
+ if copy_type == 'secondary':
+ if attrs['sync'] == 'yes':
+ return ('active', None)
+ else:
+ progress_info = self.ssh.lsvdisksyncprogress(vdisk, copy_id)
+ extended = 'progress: %s%%' % progress_info['progress']
+ return ('copying', extended)
+ return (None, None)
+
def _prepare_fc_map(self, fc_map_id, timeout):
self.ssh.prestartfcmap(fc_map_id)
mapping_ready = False
self.ssh.rmvdisk(vdisk, force=force)
LOG.debug('leave: delete_vdisk: vdisk %s' % vdisk)
- def create_copy(self, src, tgt, src_id, config, opts, full_copy):
+ def create_copy(self, src, tgt, src_id, config, opts,
+ full_copy, pool=None):
"""Create a new snapshot using FlashCopy."""
LOG.debug('enter: create_copy: snapshot %(src)s to %(tgt)s' %
{'tgt': tgt, 'src': src})
raise exception.VolumeDriverException(message=msg)
src_size = src_attrs['capacity']
- pool = config.storwize_svc_volpool_name
+ # In case we need to use a specific pool
+ if not pool:
+ pool = config.storwize_svc_volpool_name
self.create_vdisk(tgt, src_size, 'b', pool, opts)
timeout = config.storwize_svc_flashcopy_timeout
try:
def rename_vdisk(self, vdisk, new_name):
self.ssh.chvdisk(vdisk, ['-name', new_name])
+
+ def change_vdisk_primary_copy(self, vdisk, copy_id):
+ self.ssh.chvdisk(vdisk, ['-primary', copy_id])
--- /dev/null
+# Copyright 2014 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import log as logging
+from cinder.volume import volume_types
+
+LOG = logging.getLogger(__name__)
+
+
+class StorwizeSVCReplication(object):
+ def __init__(self, driver):
+ self.driver = driver
+
+ @staticmethod
+ def factory(driver):
+ """Use replication methods for the requested mode."""
+ stretch = driver.configuration.storwize_svc_stretched_cluster_partner
+ if stretch:
+ return StorwizeSVCReplicationStretchedCluster(driver)
+
+ def create_replica(self, ctxt, volume):
+ return (None, None)
+
+ def is_replicated(self, volume):
+ return False
+
+ def promote_replica(self, volume):
+ pass
+
+ def test_replica(self, tgt_volume, src_volume):
+ pass
+
+ def get_replication_status(self, volume):
+ return None
+
+ def get_replication_info(self):
+ return {}
+
+ def reenable_replication(self, volume):
+ """Enable the replication between the primary and secondary volumes.
+
+ This is not implemented in the StorwizeSVCReplicationStretchedCluster,
+ as the Storwize backend is responsible for automatically resuming
+ mirroring when stopped.
+ """
+ pass
+
+
+class StorwizeSVCReplicationStretchedCluster(StorwizeSVCReplication):
+ """Support for Storwize/SVC stretched cluster mode replication."""
+
+ def __init__(self, driver):
+ super(StorwizeSVCReplicationStretchedCluster, self).__init__(driver)
+
+ def create_replica(self, ctxt, volume):
+ conf = self.driver.configuration
+ vol_type = volume['volume_type_id']
+ vol_type = volume_types.get_volume_type(ctxt, vol_type)
+ dest_pool = conf.storwize_svc_stretched_cluster_partner
+
+ self.driver.add_vdisk_copy(volume['name'], dest_pool, vol_type)
+ vol_update = {'replication_status': 'copying'}
+ return vol_update
+
+ def delete_replica(self, volume):
+ vdisk = volume['name']
+ copies = self.driver._helpers.get_vdisk_copies(vdisk)
+ secondary = copies['secondary']
+
+ if secondary:
+ self.driver._helpers.rm_vdisk_copy(volume['name'],
+ secondary['copy_id'])
+ else:
+ LOG.info(('Could not find replica to delete of'
+ ' volume %(vol)s.') % {'vol': vdisk})
+
+ def test_replica(self, tgt_volume, src_volume):
+ vdisk = src_volume['name']
+ opts = self.driver._get_vdisk_params(tgt_volume['volume_type_id'])
+ copies = self.driver._helpers.get_vdisk_copies(vdisk)
+
+ if copies['secondary']:
+ dest_pool = copies['secondary']['mdisk_grp_name']
+ self.driver._helpers.create_copy(src_volume['name'],
+ tgt_volume['name'],
+ src_volume['id'],
+ self.driver.configuration,
+ opts,
+ True,
+ pool=dest_pool)
+ else:
+ msg = (_('Unable to create replica clone for volume %s'), vdisk)
+ raise exception.VolumeDriverException(message=msg)
+
+ def promote_replica(self, volume):
+ vdisk = volume['name']
+ copies = self.driver._helpers.get_vdisk_copies(vdisk)
+ if copies['secondary']:
+ copy_id = copies['secondary']['copy_id']
+ self.driver._helpers.change_vdisk_primary_copy(volume['name'],
+ copy_id)
+ else:
+ msg = (_('Unable to promote replica to primary for volume %s.'
+ ' No secondary copy available.'),
+ volume['id'])
+ raise exception.VolumeDriverException(message=msg)
+
+ def get_replication_status(self, volume):
+ # Make sure volume is replicated, otherwise ignore
+ if volume['replication_status'] == 'disabled':
+ return None
+
+ vdisk = volume['name']
+ orig = (volume['replication_status'],
+ volume['replication_extended_status'])
+ copies = self.driver._helpers.get_vdisk_copies(vdisk)
+
+ primary = copies.get('primary', None)
+ secondary = copies.get('secondary', None)
+ status = None
+
+ # Check status of primary copy
+ if not primary:
+ primary = {'status': 'not found',
+ 'sync': 'no'}
+ if primary['status'] != 'online':
+ status = 'error'
+ else:
+ status = 'active'
+
+ extended1 = (_('Primary copy status: %(status)s'
+ ' and synchronized: %(sync)s') %
+ {'status': primary['status'],
+ 'sync': primary['sync']})
+ # Check status of secondary copy
+ if not secondary:
+ secondary = {'status': 'not found',
+ 'sync': 'no',
+ 'sync_progress': '0'}
+
+ if secondary['status'] != 'online':
+ status = 'error'
+ else:
+ if secondary['sync'] == 'yes':
+ status = 'active'
+ secondary['sync_progress'] = '100'
+ else:
+ status = 'copying'
+
+ extended2 = (_('Secondary copy status: %(status)s'
+ ' and synchronized: %(sync)s,'
+ ' sync progress is: %(progress)s%%') %
+ {'status': secondary['status'],
+ 'sync': secondary['sync'],
+ 'progress': secondary['sync_progress']})
+
+ extended = '%s. %s' % (extended1, extended2)
+
+ if (status, extended) != orig:
+ return {'replication_status': status,
+ 'replication_extended_status': extended}
+ else:
+ return None
+
+ def get_replication_info(self):
+ data = {}
+ data['replication'] = True
+ return data
ssh_cmd += [vdisk]
return self.run_ssh_info(ssh_cmd, with_header=with_header)
+ def lsvdisksyncprogress(self, vdisk, copy_id):
+ ssh_cmd = ['svcinfo', 'lsvdisksyncprogress', '-delim', '!',
+ '-copy', copy_id, vdisk]
+ return self.run_ssh_info(ssh_cmd, with_header=True)[0]
+
def rmvdiskcopy(self, vdisk, copy_id):
ssh_cmd = ['svctask', 'rmvdiskcopy', '-copy', copy_id, vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
# Allow tenants to specify QOS on create (boolean value)
#storwize_svc_allow_tenant_qos=false
+# If operating in stretched cluster mode, specify the name of
+# the pool in which mirrored copies are stored.Example:
+# "pool2" (string value)
+#storwize_svc_stretched_cluster_partner=<None>
+
#
# Options defined in cinder.volume.drivers.ibm.xiv_ds8k