]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Ensure replication functions check driver status
authorWalter A. Boring IV <walter.boring@hp.com>
Thu, 1 Oct 2015 23:04:04 +0000 (16:04 -0700)
committerWalter A. Boring IV (hemna) <walter.boring@hpe.com>
Thu, 1 Oct 2015 23:44:08 +0000 (23:44 +0000)
This patch adds the missing checks to ensure
the driver's are initialized to both
enable_replication and disable_replication.

This patch also adds comment blocks to the
failover_replication as well as list_replication_targets
functions to explain why they are both are not
checking the driver for initialization.  One of the main
use cases for those 2 functions happen precisely when the
driver can't talk to the primary backend, but might be
able to talk to the secondary or target backends.

Change-Id: I4052e0ff4d2977ecfba940db56dba24c063bbbb6
Closes-Bug: #1501936

cinder/tests/unit/test_volume.py
cinder/volume/manager.py

index 5000f26981a442caf8d9cef4edf8dc0d429693ba..ed1f832b45fdbb5b2ac0033bad302f00619eec68 100644 (file)
@@ -6268,6 +6268,20 @@ class GenericVolumeDriverTestCase(DriverTestCase):
             volume_api.enable_replication(ctxt, volume)
             self.assertTrue(mock_enable_rep.called)
 
+    def test_enable_replication_driver_initialized(self):
+        volume = tests_utils.create_volume(self.context,
+                                           size=1,
+                                           host=CONF.host,
+                                           replication_status='enabling')
+        # set initialized to False
+        self.volume.driver._initialized = False
+
+        # start test
+        self.assertRaises(exception.DriverNotInitialized,
+                          self.volume.enable_replication,
+                          self.context,
+                          volume)
+
     def test_disable_replication_invalid_state(self):
         volume_api = cinder.volume.api.API()
         ctxt = context.get_admin_context()
@@ -6298,6 +6312,20 @@ class GenericVolumeDriverTestCase(DriverTestCase):
             volume_api.disable_replication(ctxt, volume)
             self.assertTrue(mock_disable_rep.called)
 
+    def test_disable_replication_driver_initialized(self):
+        volume = tests_utils.create_volume(self.context,
+                                           size=1,
+                                           host=CONF.host,
+                                           replication_status='disabling')
+        # set initialized to False
+        self.volume.driver._initialized = False
+
+        # start test
+        self.assertRaises(exception.DriverNotInitialized,
+                          self.volume.disable_replication,
+                          self.context,
+                          volume)
+
     @mock.patch.object(utils, 'brick_get_connector_properties')
     @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume')
     @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume')
index 7dc47d9a394234fd7854cdebdf3e9a96da0e0501..aa70d39d83b15bc5c47a2ef380480cef340d5252 100644 (file)
@@ -3083,6 +3083,14 @@ class VolumeManager(manager.SchedulerDependentManager):
         :param context: security context
         :param volume: volume object returned by DB
         """
+        try:
+            # If the driver isn't initialized, we can't talk to the backend
+            # so the driver can't enable replication
+            utils.require_driver_initialized(self.driver)
+        except exception.DriverNotInitialized:
+            with excutils.save_and_reraise_exception():
+                LOG.error(_LE("Can't enable replication because the "
+                              "driver isn't initialized"))
 
         # NOTE(jdg): We're going to do fresh get from the DB and verify that
         # we are in an expected state ('enabling')
@@ -3101,6 +3109,7 @@ class VolumeManager(manager.SchedulerDependentManager):
             err_msg = (_("Enable replication for volume failed."))
             LOG.exception(err_msg, resource=volume)
             raise exception.VolumeBackendAPIException(data=err_msg)
+
         try:
             if rep_driver_data:
                 volume = self.db.volume_update(context,
@@ -3125,6 +3134,14 @@ class VolumeManager(manager.SchedulerDependentManager):
         :param context: security context
         :param volume: volume object returned by DB
         """
+        try:
+            # If the driver isn't initialized, we can't talk to the backend
+            # so the driver can't enable replication
+            utils.require_driver_initialized(self.driver)
+        except exception.DriverNotInitialized:
+            with excutils.save_and_reraise_exception():
+                LOG.error(_LE("Can't disable replication because the "
+                              "driver isn't initialized"))
 
         volume = self.db.volume_get(context, volume['id'])
         if volume['replication_status'] != 'disabling':
@@ -3175,6 +3192,13 @@ class VolumeManager(manager.SchedulerDependentManager):
         :param volume: volume object returned by DB
         :param secondary: Specifies rep target to fail over to
         """
+        # NOTE(hemna) We intentionally don't enforce the driver being
+        # initialized here.  because the primary might actually be down,
+        # but we still want to give the driver a chance of doing some work
+        # against the target.  It's entirely up to the driver to deal with
+        # not being able to talk to the primary array that it's configured
+        # to manage.
+
         try:
             volume = self.db.volume_get(context, volume['id'])
             model_update = self.driver.replication_failover(context,
@@ -3257,6 +3281,12 @@ class VolumeManager(manager.SchedulerDependentManager):
         passwords or sensitive information.
 
         """
+        # NOTE(hemna) We intentionally don't enforce the driver being
+        # initialized here.  because the primary might actually be down,
+        # but we still want to give the driver a chance of doing some work
+        # against the target.  It's entirely up to the driver to deal with
+        # not being able to talk to the primary array that it's configured
+        # to manage.
 
         try:
             volume = self.db.volume_get(context, volume['id'])