From c81249107ffd5fc8fed2c6438b9521892789363f Mon Sep 17 00:00:00 2001 From: wanghao Date: Tue, 28 Jul 2015 16:30:39 +0800 Subject: [PATCH] Enhance deletion efficiency when backup init host Now in backup init host, if the status of backup is 'deleting', cinder will delete those backups sequentially. So if the deletion process is slow, the backup service thread will be blocked. Similar to volume deletion, the threadpool could be used to offload all the pending backup delete operations. Add CONF.backup_service_inithost_offload to specify if using threadpool. DocImpact Implements: blueprint enhance-deletion-efficiency-when-backup-init-host Change-Id: If9d9e454cdd48669db6e4b8d96f340bbb6f99fad --- cinder/backup/manager.py | 15 +++++++++++++-- cinder/manager.py | 6 ++++++ cinder/tests/unit/test_backup.py | 22 ++++++++++++++++++++++ cinder/volume/manager.py | 6 ------ 4 files changed, 41 insertions(+), 8 deletions(-) diff --git a/cinder/backup/manager.py b/cinder/backup/manager.py index e9449ba34..f5a1029d9 100644 --- a/cinder/backup/manager.py +++ b/cinder/backup/manager.py @@ -57,7 +57,11 @@ LOG = logging.getLogger(__name__) backup_manager_opts = [ cfg.StrOpt('backup_driver', default='cinder.backup.drivers.swift', - help='Driver to use for backups.',) + help='Driver to use for backups.',), + cfg.BoolOpt('backup_service_inithost_offload', + default=False, + help='Offload pending backup delete during ' + 'backup service startup.',), ] # This map doesn't need to be extended in the future since it's only @@ -251,7 +255,14 @@ class BackupManager(manager.SchedulerDependentManager): backup.save() if backup['status'] == 'deleting': LOG.info(_LI('Resuming delete on backup: %s.'), backup['id']) - self.delete_backup(ctxt, backup) + if CONF.backup_service_inithost_offload: + # Offload all the pending backup delete operations to the + # threadpool to prevent the main backup service thread + # from being blocked. + self._add_to_threadpool(self.delete_backup, ctxt, backup) + else: + # By default, delete backups sequentially + self.delete_backup(ctxt, backup) self._cleanup_temp_volumes_snapshots(backups) diff --git a/cinder/manager.py b/cinder/manager.py index 021b40dcf..be9de3e7e 100644 --- a/cinder/manager.py +++ b/cinder/manager.py @@ -61,6 +61,8 @@ from cinder.db import base from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder import version +from eventlet import greenpool + CONF = cfg.CONF LOG = logging.getLogger(__name__) @@ -140,6 +142,7 @@ class SchedulerDependentManager(Manager): self.last_capabilities = None self.service_name = service_name self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() + self._tp = greenpool.GreenPool() super(SchedulerDependentManager, self).__init__(host, db_driver) def update_service_capabilities(self, capabilities): @@ -156,3 +159,6 @@ class SchedulerDependentManager(Manager): self.service_name, self.host, self.last_capabilities) + + def _add_to_threadpool(self, func, *args, **kwargs): + self._tp.spawn_n(func, *args, **kwargs) diff --git a/cinder/tests/unit/test_backup.py b/cinder/tests/unit/test_backup.py index 3acb4ad2b..e1a0100d2 100644 --- a/cinder/tests/unit/test_backup.py +++ b/cinder/tests/unit/test_backup.py @@ -228,6 +228,28 @@ class BackupTestCase(BaseBackupTest): self.assertTrue(mock_delete_volume.called) self.assertTrue(mock_delete_snapshot.called) + @mock.patch('cinder.objects.backup.BackupList.get_all_by_host') + @mock.patch('cinder.manager.SchedulerDependentManager._add_to_threadpool') + def test_init_host_with_service_inithost_offload(self, + mock_add_threadpool, + mock_get_all_by_host): + self.override_config('backup_service_inithost_offload', True) + vol1_id = self._create_volume_db_entry() + db.volume_update(self.ctxt, vol1_id, {'status': 'available'}) + backup1 = self._create_backup_db_entry(status='deleting', + volume_id=vol1_id) + + vol2_id = self._create_volume_db_entry() + db.volume_update(self.ctxt, vol2_id, {'status': 'available'}) + backup2 = self._create_backup_db_entry(status='deleting', + volume_id=vol2_id) + mock_get_all_by_host.return_value = [backup1, backup2] + self.backup_mgr.init_host() + calls = [mock.call(self.backup_mgr.delete_backup, mock.ANY, backup1), + mock.call(self.backup_mgr.delete_backup, mock.ANY, backup2)] + mock_add_threadpool.assert_has_calls(calls, any_order=True) + self.assertEqual(2, mock_add_threadpool.call_count) + @mock.patch.object(db, 'volume_get') @ddt.data(KeyError, exception.VolumeNotFound) def test_cleanup_temp_volumes_snapshots(self, diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index a231e2f63..dd5e41af9 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -69,8 +69,6 @@ from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import utils as vol_utils from cinder.volume import volume_types -from eventlet import greenpool - LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS @@ -200,7 +198,6 @@ class VolumeManager(manager.SchedulerDependentManager): *args, **kwargs) self.configuration = config.Configuration(volume_manager_opts, config_group=service_name) - self._tp = greenpool.GreenPool() self.stats = {} if not volume_driver: @@ -234,9 +231,6 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.error(_LE("Invalid JSON: %s"), self.driver.configuration.extra_capabilities) - def _add_to_threadpool(self, func, *args, **kwargs): - self._tp.spawn_n(func, *args, **kwargs) - def _count_allocated_capacity(self, ctxt, volume): pool = vol_utils.extract_host(volume['host'], 'pool') if pool is None: -- 2.45.2