From fa6ab2483de36c447d199643eb1e5f7de6390165 Mon Sep 17 00:00:00 2001 From: Steven Kaufer Date: Tue, 10 Feb 2015 21:12:16 +0000 Subject: [PATCH] Replication status periodic task optimization A periodic task exists to update the replication status for all volumes. Currently, this task executes for all drivers and always retrieves all volumes for the current host from the DB. This patch set: * Ensures that the periodic task is only activated if the driver actually supports replication * Only retrieves volumes from the DB if 'replication_status'!='disabled' in the periodic task Also, the driver documentation in cinder.volume.driver.VolumeDriver. get_volume_stats() is updated to reflect that the 'replication' key indicates that the driver supports replication; this is the key that was actually implemented in the drivers that support replication. Change-Id: I61fbc31567ad0b6908a00113adeaccf415343e8e Closes-Bug: 1383524 --- cinder/tests/test_volume.py | 61 +++++++++++++++++++++++++++++++++++++ cinder/volume/driver.py | 2 +- cinder/volume/manager.py | 53 +++++++++++++++----------------- 3 files changed, 87 insertions(+), 29 deletions(-) diff --git a/cinder/tests/test_volume.py b/cinder/tests/test_volume.py index e134ad711..fa813906c 100644 --- a/cinder/tests/test_volume.py +++ b/cinder/tests/test_volume.py @@ -303,6 +303,24 @@ class VolumeTestCase(BaseVolumeTestCase): self.volume.delete_volume(self.context, vol3['id']) self.volume.delete_volume(self.context, vol4['id']) + @mock.patch.object(vol_manager.VolumeManager, 'add_periodic_task') + def test_init_host_repl_enabled_periodic_task(self, mock_add_p_task): + manager = vol_manager.VolumeManager() + with mock.patch.object(manager.driver, + 'get_volume_stats') as m_get_stats: + m_get_stats.return_value = {'replication': True} + manager.init_host() + mock_add_p_task.assert_called_once_with(mock.ANY) + + @mock.patch.object(vol_manager.VolumeManager, 'add_periodic_task') + def test_init_host_repl_disabled_periodic_task(self, mock_add_p_task): + manager = vol_manager.VolumeManager() + with mock.patch.object(manager.driver, + 'get_volume_stats') as m_get_stats: + m_get_stats.return_value = {'replication': False} + manager.init_host() + self.assertEqual(0, mock_add_p_task.call_count) + @mock.patch.object(QUOTAS, 'reserve') @mock.patch.object(QUOTAS, 'commit') @mock.patch.object(QUOTAS, 'rollback') @@ -641,6 +659,49 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.CinderException, vol_manager.VolumeManager) + @mock.patch.object(db, 'volume_get_all_by_host') + def test_update_replication_rel_status(self, m_get_by_host): + m_get_by_host.return_value = [mock.sentinel.vol] + ctxt = context.get_admin_context() + manager = vol_manager.VolumeManager() + with mock.patch.object(manager.driver, + 'get_replication_status') as m_get_rep_status: + m_get_rep_status.return_value = None + manager._update_replication_relationship_status(ctxt) + m_get_rep_status.assert_called_once_with(ctxt, mock.sentinel.vol) + exp_filters = { + 'replication_status': + ['active', 'copying', 'error', 'active-stopped', 'inactive']} + m_get_by_host.assert_called_once_with(ctxt, manager.host, + filters=exp_filters) + + @mock.patch.object(db, 'volume_get_all_by_host', + mock.Mock(return_value=[{'id': 'foo'}])) + @mock.patch.object(db, 'volume_update') + def test_update_replication_rel_status_update_vol(self, mock_update): + """Volume is updated with replication update data.""" + ctxt = context.get_admin_context() + manager = vol_manager.VolumeManager() + with mock.patch.object(manager.driver, + 'get_replication_status') as m_get_rep_status: + m_get_rep_status.return_value = mock.sentinel.model_update + manager._update_replication_relationship_status(ctxt) + mock_update.assert_called_once_with(ctxt, 'foo', + mock.sentinel.model_update) + + @mock.patch.object(db, 'volume_get_all_by_host', + mock.Mock(return_value=[{'id': 'foo'}])) + def test_update_replication_rel_status_with_repl_support_exc(self): + """Exception handled when raised getting replication status.""" + ctxt = context.get_admin_context() + manager = vol_manager.VolumeManager() + manager.driver._initialized = True + manager.driver._stats['replication'] = True + with mock.patch.object(manager.driver, + 'get_replication_status') as m_get_rep_status: + m_get_rep_status.side_effect = Exception() + manager._update_replication_relationship_status(ctxt) + def test_delete_busy_volume(self): """Test volume survives deletion if driver reports it as busy.""" volume = tests_utils.create_volume(self.context, **self.volume_params) diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py index 4114f9dd0..ae477dd4b 100644 --- a/cinder/volume/driver.py +++ b/cinder/volume/driver.py @@ -402,7 +402,7 @@ class BaseVD(object): True, run the update first. For replication the following state should be reported: - replication_support = True (None or false disables replication) + replication = True (None or false disables replication) """ return None diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 5265b08ae..dd8c558ba 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -347,6 +347,16 @@ class VolumeManager(manager.SchedulerDependentManager): # collect and publish service capabilities self.publish_service_capabilities(ctxt) + # conditionally run replication status task + stats = self.driver.get_volume_stats(refresh=True) + if stats and stats.get('replication', False): + + @periodic_task.periodic_task + def run_replication_task(self, ctxt): + self._update_replication_relationship_status(ctxt) + + self.add_periodic_task(run_replication_task) + def create_volume(self, context, volume_id, request_spec=None, filter_properties=None, allow_reschedule=True, snapshot_id=None, image_id=None, source_volid=None, @@ -1620,36 +1630,23 @@ class VolumeManager(manager.SchedulerDependentManager): raise exception.ReplicationError(reason=err_msg, volume_id=volume_id) - @periodic_task.periodic_task def _update_replication_relationship_status(self, ctxt): LOG.info(_LI('Updating volume replication status.')) - if not self.driver.initialized: - if self.driver.configuration.config_group is None: - config_group = '' - else: - config_group = ('(config name %s)' % - self.driver.configuration.config_group) - - LOG.warning(_LW('Unable to update volume replication status, ' - '%(driver_name)s -%(driver_version)s ' - '%(config_group)s driver is uninitialized.') % - {'driver_name': self.driver.__class__.__name__, - 'driver_version': self.driver.get_version(), - 'config_group': config_group}) - else: - volumes = self.db.volume_get_all_by_host(ctxt, self.host) - for vol in volumes: - model_update = None - try: - model_update = self.driver.get_replication_status( - ctxt, vol) - if model_update: - self.db.volume_update(ctxt, - vol['id'], - model_update) - except Exception: - LOG.exception(_LE("Error checking replication status for " - "volume %s") % vol['id']) + # Only want volumes that do not have a 'disabled' replication status + filters = {'replication_status': ['active', 'copying', 'error', + 'active-stopped', 'inactive']} + volumes = self.db.volume_get_all_by_host(ctxt, self.host, + filters=filters) + for vol in volumes: + model_update = None + try: + model_update = self.driver.get_replication_status( + ctxt, vol) + if model_update: + self.db.volume_update(ctxt, vol['id'], model_update) + except Exception: + LOG.exception(_LE("Error checking replication status for " + "volume %s") % vol['id']) def create_consistencygroup(self, context, group_id): """Creates the consistency group.""" -- 2.45.2