]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Storwize: Update replication to v2.1
authorVincent Hou <shou@us.ibm.com>
Tue, 1 Mar 2016 19:26:52 +0000 (14:26 -0500)
committerVincent Hou <shou@us.ibm.com>
Wed, 9 Mar 2016 23:13:44 +0000 (18:13 -0500)
This patch updates replication to match the v2.1 spec. This makes
it possible to replicate an entire backend, and upon failover, all
replicated volumes will be failed over together.

cinder.conf should have the replication config group:

The replication can be configured via either multi-backend on one
cinder volume node, or on separate cinder volume nodes.

Options to be put in cinder.conf, where the primary back-end is
located:

enabled_backends = sv1, sv2 (if enabling multi-backends)

[sv1]
san_login = admin
san_password = admin
san_ip = 192.168.0.11
volume_driver = cinder.volume.drivers.ibm.storwize_svc.\
                storwize_svc_iscsi.StorwizeSVCISCSIDriver
volume_backend_name = sv1
storwize_svc_volpool_name=cinder
replication_device = managed_backend_name:second_host@sv2#sv2,
                     backend_id:svc_backend_id,
                     replication_mode:global,
                     san_ip:192.168.0.12,san_login:admin,
                     san_password:admin,pool_name:cinder_target

Options to be put in cinder.conf, where the secondary back-end is
connected:

[sv2]
san_login = admin
san_password = admin
san_ip = 192.168.0.12
volume_driver = cinder.volume.drivers.ibm.storwize_svc.\
                storwize_svc_iscsi.StorwizeSVCISCSIDriver
volume_backend_name = sv2
storwize_svc_volpool_name=cinder_target

DocImpact
Closes-Bug: #1544611

Change-Id: I8a4963fa4b30f2df1903697909deece762228257

cinder/tests/unit/test_storwize_svc.py
cinder/volume/drivers/ibm/storwize_svc/replication.py
cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py
cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py
cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py
releasenotes/notes/replication-v2.1-storwize-2df7bfd8c253090b.yaml [new file with mode: 0644]
releasenotes/notes/storwize-v2-replication-mirror-managed-50c1b2996790760e.yaml [deleted file]

index 281298131de2c4ca4318607f1417508805fd0918..4528a71101bb778096e90f7f871d7cb457b8bae9 100644 (file)
@@ -4769,7 +4769,7 @@ class StorwizeSVCReplicationMirrorTestCase(test.TestCase):
         extra_spec_rep_type = '<in> ' + self.rep_type
         fake_target = {"managed_backend_name": "second_host@sv2#sv2",
                        "replication_mode": self.rep_type,
-                       "target_device_id": "svc_id_target",
+                       "backend_id": "svc_id_target",
                        "san_ip": "192.168.10.23",
                        "san_login": "admin",
                        "san_password": "admin",
@@ -4784,10 +4784,10 @@ class StorwizeSVCReplicationMirrorTestCase(test.TestCase):
         self.svc_driver.replications[self.rep_type] = (
             self.svc_driver.replication_factory(self.rep_type, fake_target))
         self.ctxt = context.get_admin_context()
-        rand_id = six.text_type(uuid.uuid4())
+        self.fake_volume_id = six.text_type(uuid.uuid4())
         pool = _get_test_pool()
-        self.volume = {'name': 'volume-%s' % rand_id,
-                       'size': 10, 'id': '%s' % rand_id,
+        self.volume = {'name': 'volume-%s' % self.fake_volume_id,
+                       'size': 10, 'id': '%s' % self.fake_volume_id,
                        'volume_type_id': None,
                        'mdisk_grp_name': 'openstack',
                        'replication_status': 'disabled',
@@ -4801,6 +4801,7 @@ class StorwizeSVCReplicationMirrorTestCase(test.TestCase):
                                                              type_ref['id'])
         self.volume['volume_type_id'] = self.replication_type['id']
         self.volume['volume_type'] = self.replication_type
+        self.volumes = [self.volume]
 
     def test_storwize_do_replication_setup(self):
         self.svc_driver.configuration.set_override('san_ip', "192.168.10.23")
@@ -4810,7 +4811,7 @@ class StorwizeSVCReplicationMirrorTestCase(test.TestCase):
 
     def test_storwize_do_replication_setup_unmanaged(self):
         fake_target = {"replication_mode": self.rep_type,
-                       "target_device_id": "svc_id_target",
+                       "backend_id": "svc_id_target",
                        "san_ip": "192.168.10.23",
                        "san_login": "admin",
                        "san_password": "admin",
@@ -4880,48 +4881,21 @@ class StorwizeSVCReplicationMirrorTestCase(test.TestCase):
         rep_setup.assert_called_once_with(self.ctxt, target_volume)
         self.assertEqual({'replication_status': 'enabled'}, model_update)
 
-    @mock.patch.object(mirror_class, 'replication_enable')
-    @mock.patch.object(mirror_class, 'volume_replication_setup')
-    def test_storwize_replication_enable(self, rep_setup,
-                                         replication_enable):
-        self.svc_driver.replication_enable(self.ctxt, self.volume)
-        replication_enable.assert_called_once_with(self.ctxt, self.volume)
-
-    @mock.patch.object(mirror_class,
-                       'replication_disable')
-    @mock.patch.object(mirror_class,
-                       'volume_replication_setup')
-    def test_storwize_replication_disable(self, rep_setup,
-                                          replication_disable):
-        self.svc_driver.replication_disable(self.ctxt, self.volume)
-        replication_disable.assert_called_once_with(self.ctxt, self.volume)
-
     @mock.patch.object(mirror_class,
-                       'replication_failover')
-    @mock.patch.object(mirror_class,
-                       'volume_replication_setup')
-    def test_storwize_replication_failover(self, rep_setup,
-                                           replication_failover):
+                       'failover_volume_host')
+    def test_storwize_failover_host(self, failover_volume_host):
         fake_secondary = 'svc_id_target'
-        self.svc_driver.replication_failover(self.ctxt, self.volume,
-                                             fake_secondary)
-        replication_failover.assert_called_once_with(self.ctxt, self.volume,
-                                                     fake_secondary)
-
-    @mock.patch.object(mirror_class,
-                       'list_replication_targets')
-    def test_storwize_list_replication_targets(self, list_targets):
-        fake_targets = [{"managed_backend_name": "second_host@sv2#sv2",
-                         "type": "managed",
-                         "target_device_id": "svc_id_target",
-                         "pool_name": "cinder_target"}]
-        list_targets.return_value = fake_targets
-        expected_resp = {'targets': fake_targets,
-                         'volume_id': self.volume['id']}
-        targets = self.svc_driver.list_replication_targets(self.ctxt,
-                                                           self.volume)
-        list_targets.assert_called_once_with(self.ctxt, self.volume)
-        self.assertEqual(expected_resp, targets)
+        target_id, volume_list = self.svc_driver.failover_host(self.ctxt,
+                                                               self.volumes,
+                                                               fake_secondary)
+        expected_list = [{'updates': {'replication_status': 'failed-over'},
+                          'volume_id': self.fake_volume_id}]
+
+        expected_calls = [mock.call(self.ctxt, self.volume,
+                                    fake_secondary)]
+        failover_volume_host.assert_has_calls(expected_calls)
+        self.assertEqual(fake_secondary, target_id)
+        self.assertEqual(expected_list, volume_list)
 
     @mock.patch.object(mirror_class,
                        '_partnership_validate_create')
@@ -4944,85 +4918,110 @@ class StorwizeSVCReplicationMirrorTestCase(test.TestCase):
         partnership_validate_create.assert_has_calls(expected_calls)
 
     @mock.patch.object(storwize_svc_common.StorwizeHelpers,
-                       'create_relationship')
+                       'switch_relationship')
     @mock.patch.object(storwize_svc_common.StorwizeHelpers,
-                       'get_system_info')
+                       'get_relationship_info')
+    def test_failover_volume_host(self, get_relationship_info,
+                                  switch_relationship):
+        fake_vol = {'id': '21345678-1234-5678-1234-567812345683'}
+        context = mock.Mock
+        secondary = 'svc_id_target'
+        get_relationship_info.return_value = (
+            {'aux_vdisk_name': 'replica-12345678-1234-5678-1234-567812345678',
+             'name': 'RC_name'})
+        self.driver.failover_volume_host(context, fake_vol, secondary)
+        get_relationship_info.assert_called_once_with(fake_vol)
+        switch_relationship.assert_called_once_with('RC_name')
+
     @mock.patch.object(storwize_svc_common.StorwizeHelpers,
-                       'create_vdisk')
+                       'switch_relationship')
     @mock.patch.object(storwize_svc_common.StorwizeHelpers,
-                       'get_vdisk_params')
+                       'get_relationship_info')
+    def test_failover_volume_host_relation_error(self, get_relationship_info,
+                                                 switch_relationship):
+        fake_vol = {'id': '21345678-1234-5678-1234-567812345683'}
+        context = mock.Mock
+        get_relationship_info.side_effect = Exception
+        secondary = 'svc_id_target'
+        self.assertRaises(exception.VolumeDriverException,
+                          self.driver.failover_volume_host,
+                          context, fake_vol, secondary)
+
     @mock.patch.object(storwize_svc_common.StorwizeHelpers,
-                       'get_vdisk_attributes')
+                       'switch_relationship')
     @mock.patch.object(storwize_svc_common.StorwizeHelpers,
                        'get_relationship_info')
-    def test_replication_enable(self, get_relationship_info,
-                                get_vdisk_attributes,
-                                get_vdisk_params,
-                                create_vdisk,
-                                get_system_info,
-                                create_relationship):
-        fake_system = 'fake_system'
-        fake_params = mock.Mock()
-        get_relationship_info.return_value = None
-        get_vdisk_attributes.return_value = None
-        get_vdisk_params.return_value = fake_params
-        get_system_info.return_value = {'system_name': fake_system}
-        model_update = self.driver.replication_enable(self.ctxt,
-                                                      self.volume)
-        get_relationship_info.assert_called_once_with(self.volume)
-        get_vdisk_attributes.assert_called_once_with(self.volume['name'])
-        create_vdisk.assert_called_once_with(self.volume['name'],
-                                             '10', 'gb', 'cinder_target',
-                                             fake_params)
-        create_relationship.assert_called_once_with(self.volume['name'],
-                                                    self.volume['name'],
-                                                    fake_system,
-                                                    self.driver.asyncmirror)
-        self.assertEqual({'replication_status': 'enabled'}, model_update)
+    def test_failover_volume_host_switch_error(self, get_relationship_info,
+                                               switch_relationship):
+        fake_vol = {'id': '21345678-1234-5678-1234-567812345683'}
+        context = mock.Mock
+        secondary = 'svc_id_target'
+        get_relationship_info.return_value = (
+            {'aux_vdisk_name': 'replica-12345678-1234-5678-1234-567812345678',
+             'RC_name': 'RC_name'})
+        switch_relationship.side_effect = Exception
+        self.assertRaises(exception.VolumeDriverException,
+                          self.driver.failover_volume_host,
+                          context, fake_vol, secondary)
 
     @mock.patch.object(storwize_svc_common.StorwizeHelpers,
-                       'delete_vdisk')
+                       'switch_relationship')
     @mock.patch.object(storwize_svc_common.StorwizeHelpers,
-                       'delete_relationship')
+                       'get_relationship_info')
+    def test_failover_volume_host_backend_mismatch(self,
+                                                   get_relationship_info,
+                                                   switch_relationship):
+        fake_vol = {'id': '21345678-1234-5678-1234-567812345683'}
+        context = mock.Mock
+        secondary = 'wrong_id'
+        get_relationship_info.return_value = (
+            {'aux_vdisk_name': 'replica-12345678-1234-5678-1234-567812345678',
+             'RC_name': 'RC_name'})
+        updates = self.driver.failover_volume_host(context, fake_vol,
+                                                   secondary)
+        self.assertFalse(get_relationship_info.called)
+        self.assertFalse(switch_relationship.called)
+        self.assertIsNone(updates)
+
+    @mock.patch.object(storwize_svc_common.StorwizeHelpers,
+                       'switch_relationship')
     @mock.patch.object(storwize_svc_common.StorwizeHelpers,
                        'get_relationship_info')
-    def test_replication_disable(self, get_relationship_info,
-                                 delete_relationship,
-                                 delete_vdisk):
-        fake_target_vol_name = 'fake_target_vol_name'
-        get_relationship_info.return_value = {'aux_vdisk_name':
-                                              fake_target_vol_name}
-        model_update = self.driver.replication_disable(self.ctxt,
-                                                       self.volume)
-        delete_relationship.assert_called_once_with(self.volume['name'])
-        delete_vdisk.assert_called_once_with(fake_target_vol_name,
-                                             False)
-        self.assertEqual({'replication_status': 'disabled'}, model_update)
+    def test_replication_failback(self, get_relationship_info,
+                                  switch_relationship):
+        fake_vol = mock.Mock()
+        get_relationship_info.return_value = {'id': 'rel_id',
+                                              'name': 'rc_name'}
+        self.driver.replication_failback(fake_vol)
+        get_relationship_info.assert_called_once_with(fake_vol)
+        switch_relationship.assert_called_once_with('rc_name', aux=False)
 
     @mock.patch.object(storwize_svc_common.StorwizeHelpers,
-                       'delete_relationship')
+                       'get_relationship_info')
+    def test_get_relationship_status_valid(self, get_relationship_info):
+        fake_vol = mock.Mock()
+        get_relationship_info.return_value = {'state': 'synchronized'}
+        status = self.driver.get_relationship_status(fake_vol)
+        get_relationship_info.assert_called_once_with(fake_vol)
+        self.assertEqual('synchronized', status)
+
     @mock.patch.object(storwize_svc_common.StorwizeHelpers,
                        'get_relationship_info')
-    def test_replication_failover(self, get_relationship_info,
-                                  delete_relationship):
-        secondary = 'svc_id_target'
-        fake_id = '546582b2-bafb-43cc-b765-bd738ab148c8'
-        expected_model_update = {'host': 'second_host@sv2#sv2',
-                                 '_name_id': fake_id}
-        fake_name = 'volume-' + fake_id
-        get_relationship_info.return_value = {'aux_vdisk_name':
-                                              fake_name}
-        model_update = self.driver.replication_failover(self.ctxt,
-                                                        self.volume,
-                                                        secondary)
-        delete_relationship.assert_called_once_with(self.volume['name'])
-        self.assertEqual(expected_model_update, model_update)
-
-    def test_list_replication_targets(self):
-        fake_targets = [{'target_device_id': 'svc_id_target'}]
-        targets = self.driver.list_replication_targets(self.ctxt,
-                                                       self.volume)
-        self.assertEqual(fake_targets, targets)
+    def test_get_relationship_status_none(self, get_relationship_info):
+        fake_vol = mock.Mock()
+        get_relationship_info.return_value = None
+        status = self.driver.get_relationship_status(fake_vol)
+        get_relationship_info.assert_called_once_with(fake_vol)
+        self.assertIsNone(status)
+
+    @mock.patch.object(storwize_svc_common.StorwizeHelpers,
+                       'get_relationship_info')
+    def test_get_relationship_status_exception(self, get_relationship_info):
+        fake_vol = {'id': 'vol-id'}
+        get_relationship_info.side_effect = exception.VolumeDriverException
+        status = self.driver.get_relationship_status(fake_vol)
+        get_relationship_info.assert_called_once_with(fake_vol)
+        self.assertIsNone(status)
 
 
 class StorwizeSVCReplicationMetroMirrorTestCase(
index 879c7028db02773b934970646a802e51b23571d3..2a1ffc9a9c39484c74364ec6cdaa3b0f2a25c8f5 100644 (file)
@@ -374,29 +374,19 @@ class StorwizeSVCReplicationGlobalMirror(
                 LOG.error(msg)
                 raise exception.VolumeDriverException(message=msg)
 
-    # #### Implementing V2 replication methods #### #
-    def replication_enable(self, context, vref):
+    def get_relationship_status(self, volume):
+        rel_info = {}
         try:
-            rel_info = self.driver._helpers.get_relationship_info(vref)
-        except Exception as e:
-            msg = (_('Failed to get remote copy information for %(volume)s '
-                     'due to %(err)s'), {'volume': vref['id'], 'err': e})
+            rel_info = self.target_helpers.get_relationship_info(volume)
+        except Exception:
+            msg = (_LE('Unable to access the Storwize back-end '
+                       'for volume %s.'), volume['id'])
             LOG.error(msg)
-            raise exception.VolumeDriverException(message=msg)
 
-        if not rel_info or not rel_info.get('aux_vdisk_name', None):
-            self.volume_replication_setup(context, vref)
+        return rel_info.get('state') if rel_info else None
 
-        model_update = {'replication_status': 'enabled'}
-        return model_update
-
-    def replication_disable(self, context, vref):
-        self.delete_target_volume(vref)
-        model_update = {'replication_status': 'disabled'}
-        return model_update
-
-    def replication_failover(self, context, vref, secondary):
-        if not self.target or self.target.get('target_device_id') != secondary:
+    def failover_volume_host(self, context, vref, secondary):
+        if not self.target or self.target.get('backend_id') != secondary:
             msg = _LE("A valid secondary target MUST be specified in order "
                       "to failover.")
             LOG.error(msg)
@@ -405,29 +395,42 @@ class StorwizeSVCReplicationGlobalMirror(
             # The admin can still issue another failover request. That is
             # why we tentatively put return None instead of raising an
             # exception.
-            return None
+            return
 
         try:
-            rel_info = self.driver._helpers.get_relationship_info(vref)
-            target_vol_name = rel_info.get('aux_vdisk_name')
-            target_vol_id = target_vol_name[-self.UUID_LEN:]
-            if rel_info:
-                self.driver._helpers.delete_relationship(vref['name'])
-            if target_vol_id == vref['id']:
-                target_vol_id = None
+            rel_info = self.target_helpers.get_relationship_info(vref)
         except Exception:
-            msg = (_('Unable to failover the replication for volume %s.'),
+            msg = (_('Unable to access the Storwize back-end for volume %s.'),
                    vref['id'])
             LOG.error(msg)
             raise exception.VolumeDriverException(message=msg)
 
-        model_update = {'host': self.target.get('managed_backend_name'),
-                        '_name_id': target_vol_id}
-        return model_update
+        if not rel_info:
+            msg = (_('Unable to get the replication relationship for volume '
+                     '%s.'),
+                   vref['id'])
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+        else:
+            try:
+                # Reverse the role of the primary and secondary volumes,
+                # because the secondary volume becomes the primary in the
+                # fail-over status.
+                self.target_helpers.switch_relationship(
+                    rel_info.get('name'))
+            except Exception as e:
+                msg = (_('Unable to fail-over the volume %(id)s to the '
+                         'secondary back-end, because the replication '
+                         'relationship is unable to switch: %(error)s'),
+                       {"id": vref['id'], "error": e})
+                LOG.error(msg)
+                raise exception.VolumeDriverException(message=msg)
 
-    def list_replication_targets(self, context, vref):
-        # For the mode of global mirror, there is only one replication target.
-        return [{'target_device_id': self.target.get('target_device_id')}]
+    def replication_failback(self, volume):
+        rel_info = self.target_helpers.get_relationship_info(volume)
+        if rel_info:
+            self.target_helpers.switch_relationship(rel_info.get('name'),
+                                                    aux=False)
 
 
 class StorwizeSVCReplicationMetroMirror(
index 83700c0d4cfde7fda91a841ce80a663f02cb9bd0..5bd6af51d3ae3114fd452ed6bb5fd3f4ba9479bc 100644 (file)
@@ -283,6 +283,12 @@ class StorwizeSSH(object):
         ssh_cmd = ['svctask', 'rmrcrelationship', relationship]
         self.run_ssh_assert_no_output(ssh_cmd)
 
+    def switchrelationship(self, relationship, aux=True):
+        primary = 'aux' if aux else 'master'
+        ssh_cmd = ['svctask', 'switchrcrelationship', '-primary',
+                   primary, relationship]
+        self.run_ssh_assert_no_output(ssh_cmd)
+
     def startrcrelationship(self, rc_rel, primary=None):
         ssh_cmd = ['svctask', 'startrcrelationship', '-force']
         if primary:
@@ -1510,6 +1516,9 @@ class StorwizeHelpers(object):
         relationship = self.ssh.lsrcrelationship(vol_attrs['RC_name'])
         return relationship[0] if len(relationship) > 0 else None
 
+    def switch_relationship(self, relationship, aux=True):
+        self.ssh.switchrelationship(relationship, aux)
+
     def get_partnership_info(self, system_name):
         partnership = self.ssh.lspartnership(system_name)
         return partnership[0] if len(partnership) > 0 else None
@@ -1843,18 +1852,21 @@ class StorwizeSVCCommonDriver(san.SanDriver,
           FC and iSCSI within the StorwizeSVCCommonDriver class
     2.1 - Added replication V2 support to the global/metro mirror
           mode
+    2.1.1 - Update replication to version 2.1
     """
 
-    VERSION = "2.1"
+    VERSION = "2.1.1"
     VDISKCOPYOPS_INTERVAL = 600
 
     GLOBAL = 'global'
     METRO = 'metro'
     VALID_REP_TYPES = (GLOBAL, METRO)
+    FAILBACK_VALUE = 'default'
 
     def __init__(self, *args, **kwargs):
         super(StorwizeSVCCommonDriver, self).__init__(*args, **kwargs)
         self.configuration.append_config_values(storwize_svc_opts)
+        self._backend_name = self.configuration.safe_get('volume_backend_name')
         self._helpers = StorwizeHelpers(self._run_ssh)
         self._vdiskcopyops = {}
         self._vdiskcopyops_loop = None
@@ -1868,6 +1880,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
                        'system_id': None,
                        'code_level': None,
                        }
+        self._active_backend_id = kwargs.get('active_backend_id')
 
         # Since there are three replication modes supported by Storwize,
         # this dictionary is used to map the replication types to certain
@@ -2427,46 +2440,115 @@ class StorwizeSVCCommonDriver(san.SanDriver,
                                                copy_op[1])
         LOG.debug("Exit: update volume copy status.")
 
-    # #### V2 replication methods #### #
-    def replication_enable(self, context, vref):
-        """Enable replication on a replication capable volume."""
-        rep_type = self._validate_volume_rep_type(context, vref)
-        if rep_type not in self.replications:
-            msg = _("Driver does not support re-enabling replication for a "
-                    "failed over volume.")
-            LOG.error(msg)
-            raise exception.ReplicationError(volume_id=vref['id'],
-                                             reason=msg)
-        return self.replications.get(rep_type).replication_enable(
-            context, vref)
-
-    def replication_disable(self, context, vref):
-        """Disable replication on a replication capable volume."""
-        rep_type = self._validate_volume_rep_type(context, vref)
-        return self.replications[rep_type].replication_disable(
-            context, vref)
-
-    def replication_failover(self, context, vref, secondary):
+    # #### V2.1 replication methods #### #
+    def failover_host(self, context, volumes, secondary_id=None):
         """Force failover to a secondary replication target."""
-        rep_type = self._validate_volume_rep_type(context, vref)
-        return self.replications[rep_type].replication_failover(
-            context, vref, secondary)
-
-    def list_replication_targets(self, context, vref):
-        """Return the list of replication targets for a volume."""
-        rep_type = self._validate_volume_rep_type(context, vref)
-
-        # When a volume is failed over, the secondary volume driver will not
-        # have replication configured, so in this case, gracefully handle
-        # request by returning no target volumes
-        if rep_type not in self.replications:
-            targets = []
-        else:
-            targets = self.replications[rep_type].list_replication_targets(
-                context, vref)
+        self._validate_replication_enabled()
+        if self.FAILBACK_VALUE == secondary_id:
+            # In this case the administrator would like to fail back.
+            volume_update_list = self._replication_failback(context,
+                                                            volumes)
+            return None, volume_update_list
+
+        # In this case the administrator would like to fail over.
+        failover_target = None
+        for target in self._replication_targets:
+            if target['backend_id'] == secondary_id:
+                failover_target = target
+                break
+        if not failover_target:
+            msg = _("A valid secondary target MUST be specified in order "
+                    "to failover.")
+            LOG.error(msg)
+            raise exception.InvalidReplicationTarget(reason=msg)
 
-        return {'volume_id': vref['id'],
-                'targets': targets}
+        target_id = failover_target['backend_id']
+        volume_update_list = []
+        for volume in volumes:
+            rep_type = self._get_volume_replicated_type(context, volume)
+            if rep_type:
+                replication = self.replications.get(rep_type)
+                if replication.target.get('backend_id') == target_id:
+                    # Check if the target backend matches the replication type.
+                    # If so, fail over the volume.
+                    try:
+                        replication.failover_volume_host(context,
+                                                         volume, target_id)
+                        volume_update_list.append(
+                            {'volume_id': volume['id'],
+                             'updates': {'replication_status': 'failed-over'}})
+                    except exception.VolumeDriverException:
+                        msg = (_LE('Unable to failover to the secondary. '
+                                   'Please make sure that the secondary '
+                                   'back-end is ready.'))
+                        LOG.error(msg)
+                        volume_update_list.append(
+                            {'volume_id': volume['id'],
+                             'updates': {'replication_status': 'error'}})
+            else:
+                # If the volume is not of replicated type, we need to
+                # force the status into error state so a user knows they
+                # do not have access to the volume.
+                volume_update_list.append(
+                    {'volume_id': volume['id'],
+                     'updates': {'status': 'error'}})
+
+        return target_id, volume_update_list
+
+    def _is_host_ready_for_failback(self, ctxt, volumes):
+        valid_sync_status = ('consistent_synchronized', 'consistent_stopped',
+                             'synchronized', 'idling')
+        # Check the status of each volume to see if it is in
+        # a consistent status.
+        for volume in volumes:
+            rep_type = self._get_volume_replicated_type(ctxt, volume)
+            if rep_type:
+                replication = self.replications.get(rep_type)
+                if replication:
+                    status = replication.get_relationship_status(volume)
+                    # We need to make sure of that all the volumes are
+                    # in the valid status to trigger a successful
+                    # fail-back. False will be be returned even if only
+                    # one volume is not ready.
+                    if status not in valid_sync_status:
+                        return False
+                else:
+                    return False
+            else:
+                return False
+        return True
+
+    def _replication_failback(self, ctxt, volumes):
+        """Fail back all the volume on the secondary backend."""
+        if not self._is_host_ready_for_failback(ctxt, volumes):
+            msg = _("The host is not ready to be failed back. Please "
+                    "resynchronize the volumes and resume replication on the "
+                    "Storwize backends.")
+            LOG.error(msg)
+            raise exception.VolumeDriverException(data=msg)
+
+        volume_update_list = []
+        for volume in volumes:
+            rep_type = self._get_volume_replicated_type(ctxt, volume)
+            if rep_type:
+                replication = self.replications.get(rep_type)
+                replication.replication_failback(volume)
+                volume_update_list.append(
+                    {'volume_id': volume['id'],
+                     'updates': {'replication_status': 'available'}})
+            else:
+                volume_update_list.append(
+                    {'volume_id': volume['id'],
+                     'updates': {'status': 'available'}})
+
+        return volume_update_list
+
+    def _validate_replication_enabled(self):
+        if not self._replication_enabled:
+            msg = _("Issuing a fail-over failed because replication is "
+                    "not properly configured.")
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
 
     def _validate_volume_rep_type(self, ctxt, volume):
         rep_type = self._get_volume_replicated_type(ctxt, volume)
@@ -2535,8 +2617,8 @@ class StorwizeSVCCommonDriver(san.SanDriver,
                 remote_array['replication_mode'] = rep_mode
                 remote_array['san_ip'] = (
                     dev.get('san_ip'))
-                remote_array['target_device_id'] = (
-                    dev.get('target_device_id'))
+                remote_array['backend_id'] = (
+                    dev.get('backend_id'))
                 remote_array['san_login'] = (
                     dev.get('san_login'))
                 remote_array['san_password'] = (
@@ -2564,7 +2646,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
                                'successfully established partnership '
                                'with the replica Storwize target %(stor)s.'),
                            {'type': rep_type,
-                            'stor': target['target_device_id']})
+                            'stor': target['backend_id']})
                     LOG.error(msg)
                     continue
 
@@ -3021,6 +3103,8 @@ class StorwizeSVCCommonDriver(san.SanDriver,
         data['pools'] = [self._build_pool_stats(pool)
                          for pool in
                          self.configuration.storwize_svc_volpool_name]
+        data['replication_enabled'] = self._replication_enabled
+        data['replication_targets'] = self._get_replication_targets(),
         self._stats = data
 
     def _build_pool_stats(self, pool):
@@ -3056,6 +3140,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
                 pool_stats.update({
                     'replication_enabled': self._replication_enabled,
                     'replication_type': self._supported_replication_types,
+                    'replication_targets': self._get_replication_targets(),
                     'replication_count': len(self._replication_targets)
                 })
             elif self.replication:
@@ -3067,6 +3152,9 @@ class StorwizeSVCCommonDriver(san.SanDriver,
 
         return pool_stats
 
+    def _get_replication_targets(self):
+        return [target['backend_id'] for target in self._replication_targets]
+
     def _manage_input_check(self, ref):
         """Verify the input of manage function."""
         # Check that the reference is valid
index 10f05446042054bd852e436af10cdba5438a6eac..3d26ad55a38baba9b925974b9ca66888ff0f6eb1 100644 (file)
@@ -80,9 +80,12 @@ class StorwizeSVCFCDriver(storwize_common.StorwizeSVCCommonDriver):
     2.0 - Code refactor, split init file and placed shared methods for
           FC and iSCSI within the StorwizeSVCCommonDriver class
     2.0.1 - Added support for multiple pools with model update
+    2.1 - Added replication V2 support to the global/metro mirror
+          mode
+    2.1.1 - Update replication to version 2.1
     """
 
-    VERSION = "2.0.1"
+    VERSION = "2.1.1"
 
     def __init__(self, *args, **kwargs):
         super(StorwizeSVCFCDriver, self).__init__(*args, **kwargs)
index dbea147267d70d08f20665bd213687cbdfa088d3..c475bd968f6d7aa3f0092f481b21e7b274fc9057 100644 (file)
@@ -80,9 +80,12 @@ class StorwizeSVCISCSIDriver(storwize_common.StorwizeSVCCommonDriver):
     2.0 - Code refactor, split init file and placed shared methods for
           FC and iSCSI within the StorwizeSVCCommonDriver class
     2.0.1 - Added support for multiple pools with model update
+    2.1 - Added replication V2 support to the global/metro mirror
+          mode
+    2.1.1 - Update replication to version 2.1
     """
 
-    VERSION = "2.0.1"
+    VERSION = "2.1.1"
 
     def __init__(self, *args, **kwargs):
         super(StorwizeSVCISCSIDriver, self).__init__(*args, **kwargs)
diff --git a/releasenotes/notes/replication-v2.1-storwize-2df7bfd8c253090b.yaml b/releasenotes/notes/replication-v2.1-storwize-2df7bfd8c253090b.yaml
new file mode 100644 (file)
index 0000000..a5fa136
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Added replication v2.1 support to the IBM Storwize driver.
\ No newline at end of file
diff --git a/releasenotes/notes/storwize-v2-replication-mirror-managed-50c1b2996790760e.yaml b/releasenotes/notes/storwize-v2-replication-mirror-managed-50c1b2996790760e.yaml
deleted file mode 100644 (file)
index 552bfe9..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
----
-features:
-  - Adds managed v2 replication global and metro mirror modes support to the IBM Storwize driver.