From 0faff1f6aca23920df2c1042b1831268ea2ae5c3 Mon Sep 17 00:00:00 2001 From: Tom Barron Date: Wed, 5 Aug 2015 17:19:37 -0600 Subject: [PATCH] Fix backup init_host volume cleanup Backup service manager init_host method cleans up leftover volumes from interrupted backup and restore operations. As the code is currently written, it only does this for volumes with attachments. This commit runs the volume cleanup for volumes in backing-up or restoring state even when they do not have attachments. Closes-bug: 1482548 Change-Id: I753c72b7adcdf57745deafaad7de616eb6243aa9 --- cinder/backup/manager.py | 56 +++++++++++++------------------- cinder/tests/unit/test_backup.py | 13 +++++++- 2 files changed, 35 insertions(+), 34 deletions(-) diff --git a/cinder/backup/manager.py b/cinder/backup/manager.py index e9449ba34..1aaf054d8 100644 --- a/cinder/backup/manager.py +++ b/cinder/backup/manager.py @@ -200,39 +200,21 @@ class BackupManager(manager.SchedulerDependentManager): for volume in volumes: volume_host = volume_utils.extract_host(volume['host'], 'backend') backend = self._get_volume_backend(host=volume_host) - attachments = volume['volume_attachment'] - if attachments: - if (volume['status'] == 'backing-up' and - volume['previous_status'] == 'available'): - LOG.info(_LI('Resetting volume %(vol_id)s to previous ' - 'status %(status)s (was backing-up).'), - {'vol_id': volume['id'], - 'status': volume['previous_status']}) - mgr = self._get_manager(backend) - for attachment in attachments: - if (attachment['attached_host'] == self.host and - attachment['instance_uuid'] is None): - mgr.detach_volume(ctxt, volume['id'], - attachment['id']) - elif (volume['status'] == 'backing-up' and - volume['previous_status'] == 'in-use'): - LOG.info(_LI('Resetting volume %(vol_id)s to previous ' - 'status %(status)s (was backing-up).'), - {'vol_id': volume['id'], - 'status': volume['previous_status']}) - self.db.volume_update(ctxt, volume['id'], - volume['previous_status']) - elif volume['status'] == 'restoring-backup': - LOG.info(_LI('setting volume %s to error_restoring ' - '(was restoring-backup).'), volume['id']) - mgr = self._get_manager(backend) - for attachment in attachments: - if (attachment['attached_host'] == self.host and - attachment['instance_uuid'] is None): - mgr.detach_volume(ctxt, volume['id'], - attachment['id']) - self.db.volume_update(ctxt, volume['id'], - {'status': 'error_restoring'}) + mgr = self._get_manager(backend) + if volume['status'] == 'backing-up': + self._detach_all_attachments(ctxt, mgr, volume) + LOG.info(_LI('Resetting volume %(vol_id)s to previous ' + 'status %(status)s (was backing-up).'), + {'vol_id': volume['id'], + 'status': volume['previous_status']}) + self.db.volume_update(ctxt, volume['id'], + {'status': volume['previous_status']}) + elif volume['status'] == 'restoring-backup': + self._detach_all_attachments(ctxt, mgr, volume) + LOG.info(_LI('setting volume %s to error_restoring ' + '(was restoring-backup).'), volume['id']) + self.db.volume_update(ctxt, volume['id'], + {'status': 'error_restoring'}) # TODO(smulcahy) implement full resume of backup and restore # operations on restart (rather than simply resetting) @@ -255,6 +237,14 @@ class BackupManager(manager.SchedulerDependentManager): self._cleanup_temp_volumes_snapshots(backups) + def _detach_all_attachments(self, ctxt, mgr, volume): + attachments = volume['volume_attachment'] or [] + for attachment in attachments: + if (attachment['attached_host'] == self.host and + attachment['instance_uuid'] is None): + mgr.detach_volume(ctxt, volume['id'], + attachment['id']) + def _cleanup_temp_volumes_snapshots(self, backups): # NOTE(xyang): If the service crashes or gets restarted during the # backup operation, there could be temporary volumes or snapshots diff --git a/cinder/tests/unit/test_backup.py b/cinder/tests/unit/test_backup.py index 5276010b1..60646bc3f 100644 --- a/cinder/tests/unit/test_backup.py +++ b/cinder/tests/unit/test_backup.py @@ -193,10 +193,13 @@ class BackupTestCase(BaseBackupTest): temp_vol_id = self._create_volume_db_entry() db.volume_update(self.ctxt, temp_vol_id, {'status': 'available'}) vol5_id = self._create_volume_db_entry() - db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'}) + db.volume_update(self.ctxt, vol5_id, {'status': 'backing-up'}) temp_snap = self._create_snapshot_db_entry() temp_snap.status = 'available' temp_snap.save() + vol6_id = self._create_volume_db_entry() + db.volume_update(self.ctxt, vol6_id, {'status': 'restoring-backup'}) + backup1 = self._create_backup_db_entry(status='creating', volume_id=vol1_id) backup2 = self._create_backup_db_entry(status='restoring', @@ -215,6 +218,14 @@ class BackupTestCase(BaseBackupTest): self.assertEqual('available', vol1['status']) vol2 = db.volume_get(self.ctxt, vol2_id) self.assertEqual('error_restoring', vol2['status']) + vol3 = db.volume_get(self.ctxt, vol3_id) + self.assertEqual('available', vol3['status']) + vol4 = db.volume_get(self.ctxt, vol4_id) + self.assertEqual('available', vol4['status']) + vol5 = db.volume_get(self.ctxt, vol5_id) + self.assertEqual('available', vol5['status']) + vol6 = db.volume_get(self.ctxt, vol6_id) + self.assertEqual('error_restoring', vol6['status']) backup1 = db.backup_get(self.ctxt, backup1.id) self.assertEqual('error', backup1['status']) -- 2.45.2