]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Enhance deletion efficiency when backup init host
authorwanghao <wanghao749@huawei.com>
Tue, 28 Jul 2015 08:30:39 +0000 (16:30 +0800)
committerwanghao <wanghao749@huawei.com>
Tue, 4 Aug 2015 03:19:00 +0000 (11:19 +0800)
Now in backup init host, if the status of backup is 'deleting', cinder
will delete those backups sequentially. So if the deletion process is
slow, the backup service thread will be blocked.

Similar to volume deletion, the threadpool could be used to
offload all the pending backup delete operations.

Add CONF.backup_service_inithost_offload to specify if using threadpool.

DocImpact
Implements: blueprint enhance-deletion-efficiency-when-backup-init-host

Change-Id: If9d9e454cdd48669db6e4b8d96f340bbb6f99fad

cinder/backup/manager.py
cinder/manager.py
cinder/tests/unit/test_backup.py
cinder/volume/manager.py

index e9449ba34be1beeebb89252a6954127bfddf81de..f5a1029d9c244e3393bb44b521f8c2cdff99ebde 100644 (file)
@@ -57,7 +57,11 @@ LOG = logging.getLogger(__name__)
 backup_manager_opts = [
     cfg.StrOpt('backup_driver',
                default='cinder.backup.drivers.swift',
-               help='Driver to use for backups.',)
+               help='Driver to use for backups.',),
+    cfg.BoolOpt('backup_service_inithost_offload',
+                default=False,
+                help='Offload pending backup delete during '
+                     'backup service startup.',),
 ]
 
 # This map doesn't need to be extended in the future since it's only
@@ -251,7 +255,14 @@ class BackupManager(manager.SchedulerDependentManager):
                 backup.save()
             if backup['status'] == 'deleting':
                 LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
-                self.delete_backup(ctxt, backup)
+                if CONF.backup_service_inithost_offload:
+                    # Offload all the pending backup delete operations to the
+                    # threadpool to prevent the main backup service thread
+                    # from being blocked.
+                    self._add_to_threadpool(self.delete_backup, ctxt, backup)
+                else:
+                    # By default, delete backups sequentially
+                    self.delete_backup(ctxt, backup)
 
         self._cleanup_temp_volumes_snapshots(backups)
 
index 021b40dcf8826f8281d3e1306e0e8c50393c29dc..be9de3e7e6fd73842ac14b45a39db5cad8a71745 100644 (file)
@@ -61,6 +61,8 @@ from cinder.db import base
 from cinder.scheduler import rpcapi as scheduler_rpcapi
 from cinder import version
 
+from eventlet import greenpool
+
 
 CONF = cfg.CONF
 LOG = logging.getLogger(__name__)
@@ -140,6 +142,7 @@ class SchedulerDependentManager(Manager):
         self.last_capabilities = None
         self.service_name = service_name
         self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
+        self._tp = greenpool.GreenPool()
         super(SchedulerDependentManager, self).__init__(host, db_driver)
 
     def update_service_capabilities(self, capabilities):
@@ -156,3 +159,6 @@ class SchedulerDependentManager(Manager):
                 self.service_name,
                 self.host,
                 self.last_capabilities)
+
+    def _add_to_threadpool(self, func, *args, **kwargs):
+        self._tp.spawn_n(func, *args, **kwargs)
index 3acb4ad2b1b2546e107314ffb77d441991c69669..e1a0100d2550747cb1de0b898bdd78b03242ebf3 100644 (file)
@@ -228,6 +228,28 @@ class BackupTestCase(BaseBackupTest):
         self.assertTrue(mock_delete_volume.called)
         self.assertTrue(mock_delete_snapshot.called)
 
+    @mock.patch('cinder.objects.backup.BackupList.get_all_by_host')
+    @mock.patch('cinder.manager.SchedulerDependentManager._add_to_threadpool')
+    def test_init_host_with_service_inithost_offload(self,
+                                                     mock_add_threadpool,
+                                                     mock_get_all_by_host):
+        self.override_config('backup_service_inithost_offload', True)
+        vol1_id = self._create_volume_db_entry()
+        db.volume_update(self.ctxt, vol1_id, {'status': 'available'})
+        backup1 = self._create_backup_db_entry(status='deleting',
+                                               volume_id=vol1_id)
+
+        vol2_id = self._create_volume_db_entry()
+        db.volume_update(self.ctxt, vol2_id, {'status': 'available'})
+        backup2 = self._create_backup_db_entry(status='deleting',
+                                               volume_id=vol2_id)
+        mock_get_all_by_host.return_value = [backup1, backup2]
+        self.backup_mgr.init_host()
+        calls = [mock.call(self.backup_mgr.delete_backup, mock.ANY, backup1),
+                 mock.call(self.backup_mgr.delete_backup, mock.ANY, backup2)]
+        mock_add_threadpool.assert_has_calls(calls, any_order=True)
+        self.assertEqual(2, mock_add_threadpool.call_count)
+
     @mock.patch.object(db, 'volume_get')
     @ddt.data(KeyError, exception.VolumeNotFound)
     def test_cleanup_temp_volumes_snapshots(self,
index a231e2f6304dee353244b83ed5fcf48d803d7dd0..dd5e41af93a0e4e3cf0ad6ee852e84b9bce1e3ce 100644 (file)
@@ -69,8 +69,6 @@ from cinder.volume import rpcapi as volume_rpcapi
 from cinder.volume import utils as vol_utils
 from cinder.volume import volume_types
 
-from eventlet import greenpool
-
 LOG = logging.getLogger(__name__)
 
 QUOTAS = quota.QUOTAS
@@ -200,7 +198,6 @@ class VolumeManager(manager.SchedulerDependentManager):
                                             *args, **kwargs)
         self.configuration = config.Configuration(volume_manager_opts,
                                                   config_group=service_name)
-        self._tp = greenpool.GreenPool()
         self.stats = {}
 
         if not volume_driver:
@@ -234,9 +231,6 @@ class VolumeManager(manager.SchedulerDependentManager):
                 LOG.error(_LE("Invalid JSON: %s"),
                           self.driver.configuration.extra_capabilities)
 
-    def _add_to_threadpool(self, func, *args, **kwargs):
-        self._tp.spawn_n(func, *args, **kwargs)
-
     def _count_allocated_capacity(self, ctxt, volume):
         pool = vol_utils.extract_host(volume['host'], 'pool')
         if pool is None: