]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Fixes migrate_volume_completion
authorgit-harry <git-harry@live.co.uk>
Sat, 19 Jul 2014 10:25:29 +0000 (11:25 +0100)
committergit-harry <git-harry@live.co.uk>
Wed, 30 Jul 2014 12:35:13 +0000 (13:35 +0100)
The following issues are addressed:

The source volume is never deleted when a migration is done because
status_update was assumed to always be a dict.

Unattached volumes are set to in-use when retyped.

In-use migrated volumes are left in status attaching.

Change-Id: Ib5034679a5f469ba53e6280a47e8c94d633cf911
Partial-Bug: 1316079

cinder/tests/test_volume.py
cinder/volume/manager.py

index 57851d4ecd1b8e758c523dfb78a7e2c15e4cd6f7..1417726b6dd9fe4588ec33a3512356ca274e8d82 100644 (file)
@@ -43,6 +43,7 @@ from cinder import keymgr
 from cinder.openstack.common import fileutils
 from cinder.openstack.common import importutils
 from cinder.openstack.common import jsonutils
+from cinder.openstack.common import log as logging
 from cinder.openstack.common import timeutils
 from cinder.openstack.common import units
 import cinder.policy
@@ -2552,6 +2553,9 @@ class VolumeTestCase(BaseVolumeTestCase):
                        lambda x, y, z, remote='dest': True)
         self.stubs.Set(volume_rpcapi.VolumeAPI, 'delete_volume',
                        fake_delete_volume_rpc)
+        error_logs = []
+        LOG = logging.getLogger('cinder.volume.manager')
+        self.stubs.Set(LOG, 'error', lambda x: error_logs.append(x))
 
         volume = tests_utils.create_volume(self.context, size=0,
                                            host=CONF.host)
@@ -2561,6 +2565,61 @@ class VolumeTestCase(BaseVolumeTestCase):
         volume = db.volume_get(context.get_admin_context(), volume['id'])
         self.assertEqual(volume['host'], 'newhost')
         self.assertIsNone(volume['migration_status'])
+        self.assertEqual(volume['status'], 'available')
+        self.assertEqual(error_logs, [])
+
+    def _test_migrate_volume_completion(self, status='available',
+                                        instance_uuid=None, attached_host=None,
+                                        retyping=False):
+        elevated = context.get_admin_context()
+        initial_status = retyping and 'retyping' or status
+        old_volume = tests_utils.create_volume(self.context, size=0,
+                                               host=CONF.host,
+                                               status=initial_status,
+                                               migration_status='migrating',
+                                               instance_uuid=instance_uuid,
+                                               attached_host=attached_host)
+        target_status = 'target:%s' % old_volume['id']
+        new_volume = tests_utils.create_volume(self.context, size=0,
+                                               host=CONF.host,
+                                               migration_status=target_status)
+
+        self.stubs.Set(volume_rpcapi.VolumeAPI, 'delete_volume',
+                       lambda x: None)
+        self.stubs.Set(volume_rpcapi.VolumeAPI, 'attach_volume',
+                       lambda *args: self.volume.attach_volume(args[1],
+                                                               args[2]['id'],
+                                                               *args[3:]))
+        self.stubs.Set(self.volume.driver, 'attach_volume',
+                       lambda *args, **kwargs: None)
+
+        self.volume.migrate_volume_completion(self.context,
+                                              old_volume['id'],
+                                              new_volume['id'])
+
+        volume = db.volume_get(elevated, old_volume['id'])
+        self.assertEqual(volume['status'], status)
+        self.assertEqual(volume['attached_host'], attached_host)
+        self.assertEqual(volume['instance_uuid'], instance_uuid)
+
+    def test_migrate_volume_completion_retype_available(self):
+        self._test_migrate_volume_completion('available', retyping=True)
+
+    def test_migrate_volume_completion_retype_in_use(self):
+        self._test_migrate_volume_completion(
+            'in-use',
+            '83c969d5-065e-4c9c-907d-5394bc2e98e2',
+            'some-host',
+            retyping=True)
+
+    def test_migrate_volume_completion_migrate_available(self):
+        self._test_migrate_volume_completion()
+
+    def test_migrate_volume_completion_migrate_in_use(self):
+        self._test_migrate_volume_completion(
+            'in-use',
+            '83c969d5-065e-4c9c-907d-5394bc2e98e2',
+            'some-host')
 
     def test_retype_setup_fail_volume_is_available(self):
         """Verify volume is still available if retype prepare failed."""
index 1f4433dcfb4c5361a8f250090b45ee116db0e5fb..740c7820462437ea52512875c0e43b536b2ef8c8 100644 (file)
@@ -998,7 +998,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         new_volume = self.db.volume_get(ctxt, new_volume_id)
         rpcapi = volume_rpcapi.VolumeAPI()
 
-        status_update = None
+        status_update = {}
         if volume['status'] == 'retyping':
             status_update = {'status': self._get_original_status(volume)}
 
@@ -1020,7 +1020,7 @@ class VolumeManager(manager.SchedulerDependentManager):
 
         # Delete the source volume (if it fails, don't fail the migration)
         try:
-            if status_update and status_update['status'] == 'in-use':
+            if status_update.get('status') == 'in-use':
                 self.detach_volume(ctxt, volume_id)
             self.delete_volume(ctxt, volume_id)
         except Exception as ex:
@@ -1029,14 +1029,14 @@ class VolumeManager(manager.SchedulerDependentManager):
 
         self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
         self.db.volume_destroy(ctxt, new_volume_id)
-        if status_update:
+        if status_update.get('status') == 'in-use':
             updates = {'migration_status': 'completing'}
             updates.update(status_update)
         else:
             updates = {'migration_status': None}
         self.db.volume_update(ctxt, volume_id, updates)
 
-        if status_update:
+        if 'in-use' in (status_update.get('status'), volume['status']):
             rpcapi.attach_volume(ctxt,
                                  volume,
                                  volume['instance_uuid'],