]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Fix backup init_host volume cleanup
authorTom Barron <tpb@dyncloud.net>
Wed, 5 Aug 2015 23:19:37 +0000 (17:19 -0600)
committerTom Barron <tpb@dyncloud.net>
Fri, 14 Aug 2015 11:38:34 +0000 (11:38 +0000)
Backup service manager init_host method cleans up leftover volumes from
interrupted backup and restore operations.  As the code is currently
written, it only does this for volumes with attachments.

This commit runs the volume cleanup for volumes in backing-up or restoring
state even when they do not have attachments.

Closes-bug: 1482548
Change-Id: I753c72b7adcdf57745deafaad7de616eb6243aa9

cinder/backup/manager.py
cinder/tests/unit/test_backup.py

index e9449ba34be1beeebb89252a6954127bfddf81de..1aaf054d89ba005bd90f2dd6353cd141cfbfbfa8 100644 (file)
@@ -200,39 +200,21 @@ class BackupManager(manager.SchedulerDependentManager):
         for volume in volumes:
             volume_host = volume_utils.extract_host(volume['host'], 'backend')
             backend = self._get_volume_backend(host=volume_host)
-            attachments = volume['volume_attachment']
-            if attachments:
-                if (volume['status'] == 'backing-up' and
-                        volume['previous_status'] == 'available'):
-                    LOG.info(_LI('Resetting volume %(vol_id)s to previous '
-                                 'status %(status)s (was backing-up).'),
-                             {'vol_id': volume['id'],
-                              'status': volume['previous_status']})
-                    mgr = self._get_manager(backend)
-                    for attachment in attachments:
-                        if (attachment['attached_host'] == self.host and
-                           attachment['instance_uuid'] is None):
-                            mgr.detach_volume(ctxt, volume['id'],
-                                              attachment['id'])
-                elif (volume['status'] == 'backing-up' and
-                        volume['previous_status'] == 'in-use'):
-                    LOG.info(_LI('Resetting volume %(vol_id)s to previous '
-                                 'status %(status)s (was backing-up).'),
-                             {'vol_id': volume['id'],
-                              'status': volume['previous_status']})
-                    self.db.volume_update(ctxt, volume['id'],
-                                          volume['previous_status'])
-                elif volume['status'] == 'restoring-backup':
-                    LOG.info(_LI('setting volume %s to error_restoring '
-                                 '(was restoring-backup).'), volume['id'])
-                    mgr = self._get_manager(backend)
-                    for attachment in attachments:
-                        if (attachment['attached_host'] == self.host and
-                           attachment['instance_uuid'] is None):
-                            mgr.detach_volume(ctxt, volume['id'],
-                                              attachment['id'])
-                    self.db.volume_update(ctxt, volume['id'],
-                                          {'status': 'error_restoring'})
+            mgr = self._get_manager(backend)
+            if volume['status'] == 'backing-up':
+                self._detach_all_attachments(ctxt, mgr, volume)
+                LOG.info(_LI('Resetting volume %(vol_id)s to previous '
+                             'status %(status)s (was backing-up).'),
+                         {'vol_id': volume['id'],
+                          'status': volume['previous_status']})
+                self.db.volume_update(ctxt, volume['id'],
+                                      {'status': volume['previous_status']})
+            elif volume['status'] == 'restoring-backup':
+                self._detach_all_attachments(ctxt, mgr, volume)
+                LOG.info(_LI('setting volume %s to error_restoring '
+                             '(was restoring-backup).'), volume['id'])
+                self.db.volume_update(ctxt, volume['id'],
+                                      {'status': 'error_restoring'})
 
         # TODO(smulcahy) implement full resume of backup and restore
         # operations on restart (rather than simply resetting)
@@ -255,6 +237,14 @@ class BackupManager(manager.SchedulerDependentManager):
 
         self._cleanup_temp_volumes_snapshots(backups)
 
+    def _detach_all_attachments(self, ctxt, mgr, volume):
+        attachments = volume['volume_attachment'] or []
+        for attachment in attachments:
+            if (attachment['attached_host'] == self.host and
+                    attachment['instance_uuid'] is None):
+                        mgr.detach_volume(ctxt, volume['id'],
+                                          attachment['id'])
+
     def _cleanup_temp_volumes_snapshots(self, backups):
         # NOTE(xyang): If the service crashes or gets restarted during the
         # backup operation, there could be temporary volumes or snapshots
index 5276010b1bd483773935426a0d95f9a4c4354a06..60646bc3fff3c38baa45e9d4c1eb460218d56955 100644 (file)
@@ -193,10 +193,13 @@ class BackupTestCase(BaseBackupTest):
         temp_vol_id = self._create_volume_db_entry()
         db.volume_update(self.ctxt, temp_vol_id, {'status': 'available'})
         vol5_id = self._create_volume_db_entry()
-        db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'})
+        db.volume_update(self.ctxt, vol5_id, {'status': 'backing-up'})
         temp_snap = self._create_snapshot_db_entry()
         temp_snap.status = 'available'
         temp_snap.save()
+        vol6_id = self._create_volume_db_entry()
+        db.volume_update(self.ctxt, vol6_id, {'status': 'restoring-backup'})
+
         backup1 = self._create_backup_db_entry(status='creating',
                                                volume_id=vol1_id)
         backup2 = self._create_backup_db_entry(status='restoring',
@@ -215,6 +218,14 @@ class BackupTestCase(BaseBackupTest):
         self.assertEqual('available', vol1['status'])
         vol2 = db.volume_get(self.ctxt, vol2_id)
         self.assertEqual('error_restoring', vol2['status'])
+        vol3 = db.volume_get(self.ctxt, vol3_id)
+        self.assertEqual('available', vol3['status'])
+        vol4 = db.volume_get(self.ctxt, vol4_id)
+        self.assertEqual('available', vol4['status'])
+        vol5 = db.volume_get(self.ctxt, vol5_id)
+        self.assertEqual('available', vol5['status'])
+        vol6 = db.volume_get(self.ctxt, vol6_id)
+        self.assertEqual('error_restoring', vol6['status'])
 
         backup1 = db.backup_get(self.ctxt, backup1.id)
         self.assertEqual('error', backup1['status'])