backup_manager_opts = [
cfg.StrOpt('backup_driver',
default='cinder.backup.drivers.swift',
- help='Driver to use for backups.',)
+ help='Driver to use for backups.',),
+ cfg.BoolOpt('backup_service_inithost_offload',
+ default=False,
+ help='Offload pending backup delete during '
+ 'backup service startup.',),
]
# This map doesn't need to be extended in the future since it's only
backup.save()
if backup['status'] == 'deleting':
LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
- self.delete_backup(ctxt, backup)
+ if CONF.backup_service_inithost_offload:
+ # Offload all the pending backup delete operations to the
+ # threadpool to prevent the main backup service thread
+ # from being blocked.
+ self._add_to_threadpool(self.delete_backup, ctxt, backup)
+ else:
+ # By default, delete backups sequentially
+ self.delete_backup(ctxt, backup)
self._cleanup_temp_volumes_snapshots(backups)
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import version
+from eventlet import greenpool
+
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
self.last_capabilities = None
self.service_name = service_name
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
+ self._tp = greenpool.GreenPool()
super(SchedulerDependentManager, self).__init__(host, db_driver)
def update_service_capabilities(self, capabilities):
self.service_name,
self.host,
self.last_capabilities)
+
+ def _add_to_threadpool(self, func, *args, **kwargs):
+ self._tp.spawn_n(func, *args, **kwargs)
self.assertTrue(mock_delete_volume.called)
self.assertTrue(mock_delete_snapshot.called)
+ @mock.patch('cinder.objects.backup.BackupList.get_all_by_host')
+ @mock.patch('cinder.manager.SchedulerDependentManager._add_to_threadpool')
+ def test_init_host_with_service_inithost_offload(self,
+ mock_add_threadpool,
+ mock_get_all_by_host):
+ self.override_config('backup_service_inithost_offload', True)
+ vol1_id = self._create_volume_db_entry()
+ db.volume_update(self.ctxt, vol1_id, {'status': 'available'})
+ backup1 = self._create_backup_db_entry(status='deleting',
+ volume_id=vol1_id)
+
+ vol2_id = self._create_volume_db_entry()
+ db.volume_update(self.ctxt, vol2_id, {'status': 'available'})
+ backup2 = self._create_backup_db_entry(status='deleting',
+ volume_id=vol2_id)
+ mock_get_all_by_host.return_value = [backup1, backup2]
+ self.backup_mgr.init_host()
+ calls = [mock.call(self.backup_mgr.delete_backup, mock.ANY, backup1),
+ mock.call(self.backup_mgr.delete_backup, mock.ANY, backup2)]
+ mock_add_threadpool.assert_has_calls(calls, any_order=True)
+ self.assertEqual(2, mock_add_threadpool.call_count)
+
@mock.patch.object(db, 'volume_get')
@ddt.data(KeyError, exception.VolumeNotFound)
def test_cleanup_temp_volumes_snapshots(self,
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
-from eventlet import greenpool
-
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
- self._tp = greenpool.GreenPool()
self.stats = {}
if not volume_driver:
LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
- def _add_to_threadpool(self, func, *args, **kwargs):
- self._tp.spawn_n(func, *args, **kwargs)
-
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None: