]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Garbage Remains when Attached Volume is Migrated with NFS Driver
authorRushil Chugh <rushil@netapp.com>
Mon, 8 Dec 2014 22:09:36 +0000 (17:09 -0500)
committerRushil Chugh <rushil@netapp.com>
Mon, 12 Jan 2015 22:28:18 +0000 (22:28 +0000)
Garbage volume remained when an attached NFS volume was migrated
to another NFS backend. An error message was generated as the
attached volume was not detached before being migrated. The root
cause of the issue is that status_update['status'] is only set
for retype and not for migration from one NFS backend to another
NFS backend. This patch proposes to fix the aforementioned issue
where migration fails from one NFS backend to another NFS backend.

Change-Id: I657a46d98b1e2a52cec128308cbbf39166c3625b
Closes-bug: 1391172

cinder/tests/test_volume.py
cinder/volume/manager.py

index 5af42eef517d4e9d0fbaf32b413ceea7c094eb28..14298a3647c5c90afc0ee796b350c86368b2ebce 100644 (file)
@@ -2828,14 +2828,15 @@ class VolumeTestCase(BaseVolumeTestCase):
         self.stubs.Set(self.volume.driver, 'attach_volume',
                        lambda *args, **kwargs: None)
 
-        self.volume.migrate_volume_completion(self.context,
-                                              old_volume['id'],
-                                              new_volume['id'])
+        with mock.patch.object(self.volume.driver, 'detach_volume') as detach:
+            self.volume.migrate_volume_completion(self.context, old_volume[
+                'id'], new_volume['id'])
 
         volume = db.volume_get(elevated, old_volume['id'])
         self.assertEqual(volume['status'], status)
         self.assertEqual(volume['attached_host'], attached_host)
         self.assertEqual(volume['instance_uuid'], instance_uuid)
+        self.assertEqual(status == 'in-use', detach.called)
 
     def test_migrate_volume_completion_retype_available(self):
         self._test_migrate_volume_completion('available', retyping=True)
index b930222661eaeba643b66ca4a7212a0bdba1b11c..1a868a52a203e770dd06d910e2bf54dad931e534 100644 (file)
@@ -1093,9 +1093,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         new_volume = self.db.volume_get(ctxt, new_volume_id)
         rpcapi = volume_rpcapi.VolumeAPI()
 
-        status_update = {}
-        if volume['status'] == 'retyping':
-            status_update = {'status': self._get_original_status(volume)}
+        orig_volume_status = self._get_original_status(volume)
 
         if error:
             msg = _("migrate_volume_completion is cleaning up an error "
@@ -1104,9 +1102,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                             'vol2': new_volume['id']})
             new_volume['migration_status'] = None
             rpcapi.delete_volume(ctxt, new_volume)
-            updates = {'migration_status': None}
-            if status_update:
-                updates.update(status_update)
+            updates = {'migration_status': None, 'status': orig_volume_status}
             self.db.volume_update(ctxt, volume_id, updates)
             return volume_id
 
@@ -1115,7 +1111,7 @@ class VolumeManager(manager.SchedulerDependentManager):
 
         # Delete the source volume (if it fails, don't fail the migration)
         try:
-            if status_update.get('status') == 'in-use':
+            if orig_volume_status == 'in-use':
                 self.detach_volume(ctxt, volume_id)
             self.delete_volume(ctxt, volume_id)
         except Exception as ex:
@@ -1130,16 +1126,14 @@ class VolumeManager(manager.SchedulerDependentManager):
                                       new_volume)
         self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
         self.db.volume_destroy(ctxt, new_volume_id)
-        if status_update.get('status') == 'in-use':
-            updates = {'migration_status': 'completing'}
-            updates.update(status_update)
+        if orig_volume_status == 'in-use':
+            updates = {'migration_status': 'completing',
+                       'status': orig_volume_status}
         else:
             updates = {'migration_status': None}
         self.db.volume_update(ctxt, volume_id, updates)
 
-        if 'in-use' in (status_update.get('status'), volume['status']):
-            # NOTE(jdg): if we're passing the ref here, why are we
-            # also passing in the various fields from that ref?
+        if orig_volume_status == 'in-use':
             rpcapi.attach_volume(ctxt,
                                  volume,
                                  volume['instance_uuid'],