From bc4a60d742bc537d2b2eebc6b754182a09baa79d Mon Sep 17 00:00:00 2001 From: "Walter A. Boring IV" Date: Thu, 1 Oct 2015 16:04:04 -0700 Subject: [PATCH] Ensure replication functions check driver status This patch adds the missing checks to ensure the driver's are initialized to both enable_replication and disable_replication. This patch also adds comment blocks to the failover_replication as well as list_replication_targets functions to explain why they are both are not checking the driver for initialization. One of the main use cases for those 2 functions happen precisely when the driver can't talk to the primary backend, but might be able to talk to the secondary or target backends. Change-Id: I4052e0ff4d2977ecfba940db56dba24c063bbbb6 Closes-Bug: #1501936 --- cinder/tests/unit/test_volume.py | 28 ++++++++++++++++++++++++++++ cinder/volume/manager.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/cinder/tests/unit/test_volume.py b/cinder/tests/unit/test_volume.py index 5000f2698..ed1f832b4 100644 --- a/cinder/tests/unit/test_volume.py +++ b/cinder/tests/unit/test_volume.py @@ -6268,6 +6268,20 @@ class GenericVolumeDriverTestCase(DriverTestCase): volume_api.enable_replication(ctxt, volume) self.assertTrue(mock_enable_rep.called) + def test_enable_replication_driver_initialized(self): + volume = tests_utils.create_volume(self.context, + size=1, + host=CONF.host, + replication_status='enabling') + # set initialized to False + self.volume.driver._initialized = False + + # start test + self.assertRaises(exception.DriverNotInitialized, + self.volume.enable_replication, + self.context, + volume) + def test_disable_replication_invalid_state(self): volume_api = cinder.volume.api.API() ctxt = context.get_admin_context() @@ -6298,6 +6312,20 @@ class GenericVolumeDriverTestCase(DriverTestCase): volume_api.disable_replication(ctxt, volume) self.assertTrue(mock_disable_rep.called) + def test_disable_replication_driver_initialized(self): + volume = tests_utils.create_volume(self.context, + size=1, + host=CONF.host, + replication_status='disabling') + # set initialized to False + self.volume.driver._initialized = False + + # start test + self.assertRaises(exception.DriverNotInitialized, + self.volume.disable_replication, + self.context, + volume) + @mock.patch.object(utils, 'brick_get_connector_properties') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume') diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 7dc47d9a3..aa70d39d8 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -3083,6 +3083,14 @@ class VolumeManager(manager.SchedulerDependentManager): :param context: security context :param volume: volume object returned by DB """ + try: + # If the driver isn't initialized, we can't talk to the backend + # so the driver can't enable replication + utils.require_driver_initialized(self.driver) + except exception.DriverNotInitialized: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Can't enable replication because the " + "driver isn't initialized")) # NOTE(jdg): We're going to do fresh get from the DB and verify that # we are in an expected state ('enabling') @@ -3101,6 +3109,7 @@ class VolumeManager(manager.SchedulerDependentManager): err_msg = (_("Enable replication for volume failed.")) LOG.exception(err_msg, resource=volume) raise exception.VolumeBackendAPIException(data=err_msg) + try: if rep_driver_data: volume = self.db.volume_update(context, @@ -3125,6 +3134,14 @@ class VolumeManager(manager.SchedulerDependentManager): :param context: security context :param volume: volume object returned by DB """ + try: + # If the driver isn't initialized, we can't talk to the backend + # so the driver can't enable replication + utils.require_driver_initialized(self.driver) + except exception.DriverNotInitialized: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Can't disable replication because the " + "driver isn't initialized")) volume = self.db.volume_get(context, volume['id']) if volume['replication_status'] != 'disabling': @@ -3175,6 +3192,13 @@ class VolumeManager(manager.SchedulerDependentManager): :param volume: volume object returned by DB :param secondary: Specifies rep target to fail over to """ + # NOTE(hemna) We intentionally don't enforce the driver being + # initialized here. because the primary might actually be down, + # but we still want to give the driver a chance of doing some work + # against the target. It's entirely up to the driver to deal with + # not being able to talk to the primary array that it's configured + # to manage. + try: volume = self.db.volume_get(context, volume['id']) model_update = self.driver.replication_failover(context, @@ -3257,6 +3281,12 @@ class VolumeManager(manager.SchedulerDependentManager): passwords or sensitive information. """ + # NOTE(hemna) We intentionally don't enforce the driver being + # initialized here. because the primary might actually be down, + # but we still want to give the driver a chance of doing some work + # against the target. It's entirely up to the driver to deal with + # not being able to talk to the primary array that it's configured + # to manage. try: volume = self.db.volume_get(context, volume['id']) -- 2.45.2