From 128dacc6188d398dcd45ba4862243a8621f0d82f Mon Sep 17 00:00:00 2001 From: =?utf8?q?Micha=C5=82=20Dulko?= Date: Wed, 16 Mar 2016 14:06:10 +0100 Subject: [PATCH] Fix compatibility mode of backup jobs scheduling While testing interoperability of Liberty and Mitaka services I've found two problems with backup jobs scheduling when running in non-scalable mode (this happens when we run mixed Liberty and Mitaka c-bak services, so during the live upgrade). First of all when passing volume.host into scheduling we don't strip it from backend and pool parts (@backend#pool), so in multi-backend environment scheduling doesn't match backup host names and that clue is silently ignored. Second problem is that we don't pass the target volume's host into the scheduling when restoring the volume. This means that we don't schedule the jobs exactly the old way and it may happen that Liberty's cinder-backup will receive restore request with a target volume he don't have access to. This commit sorts both problems out. Please note that this is very low-risk, as it affects only this non-scalable mode, which without this is broken anyway. Change-Id: Ib7195f27a1e455732db8621cd09ba87daa5d5a33 Closes-Bug: 1558073 --- cinder/backup/api.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cinder/backup/api.py b/cinder/backup/api.py index c447859ef..bd159fa8a 100644 --- a/cinder/backup/api.py +++ b/cinder/backup/api.py @@ -200,6 +200,9 @@ class API(base.Base): # This snippet should go away in Newton. Note that volume_host # parameter will also be unnecessary then. if not self._is_scalable_only(): + if volume_host: + volume_host = volume_utils.extract_host(volume_host, + level='host') if volume_host and self._is_backup_service_enabled(az, volume_host): return volume_host @@ -444,7 +447,7 @@ class API(base.Base): # Setting the status here rather than setting at start and unrolling # for each error condition, it should be a very small window backup.host = self._get_available_backup_service_host( - backup.host, backup.availability_zone) + backup.host, backup.availability_zone, volume_host=volume.host) backup.status = fields.BackupStatus.RESTORING backup.restore_volume_id = volume.id backup.save() -- 2.45.2