]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Add finish_volume_migration to volume object
authorThang Pham <thang.g.pham@gmail.com>
Tue, 15 Dec 2015 00:19:24 +0000 (16:19 -0800)
committerThang Pham <thang.g.pham@gmail.com>
Fri, 8 Jan 2016 19:20:49 +0000 (19:20 +0000)
The following patch adds finish_volume_migration() to
the volume object, so it could be used by
migrate_volume_completion() in cinder/volume/manager.py
to properly delete the destination volume on failure.
It also optimizes the call to finish_volume_migration
by using the existing volume objects instead of
performing additional volume lookups.

Change-Id: I02ddf2afb8643e4d3c68934627ad35193e0137ef
Closes-Bug: #1520102

cinder/db/api.py
cinder/db/sqlalchemy/api.py
cinder/objects/volume.py
cinder/tests/unit/db/test_finish_migration.py [deleted file]
cinder/tests/unit/objects/test_objects.py
cinder/tests/unit/objects/test_volume.py
cinder/volume/manager.py

index 76e9c352c63f14ee5a31c5a9518d6deeb31c187f..c157d2f79757aa9f602aa9e0a9eb7d9029e01ccf 100644 (file)
@@ -186,11 +186,6 @@ def volume_data_get_for_project(context, project_id):
     return IMPL.volume_data_get_for_project(context, project_id)
 
 
-def finish_volume_migration(context, src_vol_id, dest_vol_id):
-    """Perform database updates upon completion of volume migration."""
-    return IMPL.finish_volume_migration(context, src_vol_id, dest_vol_id)
-
-
 def volume_destroy(context, volume_id):
     """Destroy the volume or raise if it does not exist."""
     return IMPL.volume_destroy(context, volume_id)
index 46c3718ab61efddb3356dad53f7fd8193b15b753..c8bc55aade3f41c4076a01ba93f02fe087d456a4 100644 (file)
@@ -1181,66 +1181,6 @@ def volume_data_get_for_project(context, project_id, volume_type_id=None):
     return _volume_data_get_for_project(context, project_id, volume_type_id)
 
 
-@require_admin_context
-def finish_volume_migration(context, src_vol_id, dest_vol_id):
-    """Swap almost all columns between dest and source.
-
-    We swap fields between source and destination at the end of migration
-    because we want to keep the original volume id in the DB but now pointing
-    to the migrated volume.
-
-    Original volume will be deleted, after this method original volume will be
-    pointed by dest_vol_id, so we set its status and migrating_status to
-    'deleting'.  We change status here to keep it in sync with migration_status
-    which must be changed here.
-
-    param src_vol_id:: ID of the migration original volume
-    param dest_vol_id: ID of the migration destination volume
-    returns: Tuple with new source and destination ORM objects.  Source will be
-             the migrated volume and destination will be original volume that
-             will be deleted.
-    """
-    session = get_session()
-    with session.begin():
-        src_volume_ref = _volume_get(context, src_vol_id, session=session,
-                                     joined_load=False)
-        src_original_data = dict(src_volume_ref.iteritems())
-        dest_volume_ref = _volume_get(context, dest_vol_id, session=session,
-                                      joined_load=False)
-
-        # NOTE(rpodolyaka): we should copy only column values, while model
-        #                   instances also have relationships attributes, which
-        #                   should be ignored
-        def is_column(inst, attr):
-            return attr in inst.__class__.__table__.columns
-
-        for key, value in dest_volume_ref.iteritems():
-            value_to_dst = src_original_data.get(key)
-            # The implementation of update_migrated_volume will decide the
-            # values for _name_id and provider_location.
-            if (key in ('id', 'provider_location')
-                    or not is_column(dest_volume_ref, key)):
-                continue
-
-            # Destination must have a _name_id since the id no longer matches
-            # the volume.  If it doesn't have a _name_id we set one.
-            elif key == '_name_id':
-                if not dest_volume_ref._name_id:
-                    setattr(dest_volume_ref, key, src_volume_ref.id)
-                continue
-            elif key == 'migration_status':
-                value = None
-                value_to_dst = 'deleting'
-            elif key == 'display_description':
-                value_to_dst = 'migration src for ' + src_volume_ref.id
-            elif key == 'status':
-                value_to_dst = 'deleting'
-
-            setattr(src_volume_ref, key, value)
-            setattr(dest_volume_ref, key, value_to_dst)
-    return src_volume_ref, dest_volume_ref
-
-
 @require_admin_context
 @_retry_on_deadlock
 def volume_destroy(context, volume_id):
index 4e5202ea7da9e4b6b3b58957a7850ab2fbdda550..d50175ef55b6d17584d2c207eddb816a086ce897 100644 (file)
@@ -55,7 +55,8 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
     # Version 1.1: Added metadata, admin_metadata, volume_attachment, and
     #              volume_type
     # Version 1.2: Added glance_metadata, consistencygroup and snapshots
-    VERSION = '1.2'
+    # Version 1.3: Added finish_volume_migration()
+    VERSION = '1.3'
 
     OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata',
                        'volume_type', 'volume_attachment', 'consistencygroup',
@@ -373,6 +374,41 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
         if not md_was_changed:
             self.obj_reset_changes(['metadata'])
 
+    def finish_volume_migration(self, dest_volume):
+        # We swap fields between source (i.e. self) and destination at the
+        # end of migration because we want to keep the original volume id
+        # in the DB but now pointing to the migrated volume.
+        skip = ({'id', 'provider_location', 'glance_metadata'} |
+                set(self.obj_extra_fields))
+        for key in set(dest_volume.fields.keys()) - skip:
+            # Only swap attributes that are already set.  We do not want to
+            # unexpectedly trigger a lazy-load.
+            if not dest_volume.obj_attr_is_set(key):
+                continue
+
+            value = getattr(dest_volume, key)
+            value_to_dst = getattr(self, key)
+
+            # Destination must have a _name_id since the id no longer matches
+            # the volume.  If it doesn't have a _name_id we set one.
+            if key == '_name_id':
+                if not dest_volume._name_id:
+                    setattr(dest_volume, key, self.id)
+                continue
+            elif key == 'migration_status':
+                value = None
+                value_to_dst = 'deleting'
+            elif key == 'display_description':
+                value_to_dst = 'migration src for ' + self.id
+            elif key == 'status':
+                value_to_dst = 'deleting'
+
+            setattr(self, key, value)
+            setattr(dest_volume, key, value_to_dst)
+
+        dest_volume.save()
+        return dest_volume
+
 
 @base.CinderObjectRegistry.register
 class VolumeList(base.ObjectListBase, base.CinderObject):
diff --git a/cinder/tests/unit/db/test_finish_migration.py b/cinder/tests/unit/db/test_finish_migration.py
deleted file mode 100644 (file)
index 6611750..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""Tests for finish_volume_migration."""
-
-
-from cinder import context
-from cinder import db
-from cinder import objects
-from cinder import test
-from cinder.tests.unit import utils as testutils
-
-
-class FinishVolumeMigrationTestCase(test.TestCase):
-    """Test cases for finish_volume_migration."""
-
-    def test_finish_volume_migration_no_volume_type(self):
-        self._test_finish_volume_migration()
-
-    def test_finish_volume_migration_with_volume_type(self):
-        source_type = {'name': 'old', 'extra_specs': {}}
-        dest_type = {'name': 'new', 'extra_specs': {}}
-        self._test_finish_volume_migration(source_type=source_type,
-                                           dest_type=dest_type)
-
-    def test_finish_volume_migration_none_to_volume_type(self):
-        dest_type = {'name': 'new', 'extra_specs': {}}
-        self._test_finish_volume_migration(dest_type=dest_type)
-
-    def _test_finish_volume_migration(self, source_type=None, dest_type=None):
-        ctxt = context.RequestContext(user_id='user_id',
-                                      project_id='project_id',
-                                      is_admin=True)
-        source_type_id = None
-        dest_type_id = None
-        if source_type:
-            source_type_id = db.volume_type_create(ctxt, source_type).id
-        if dest_type:
-            dest_type_id = db.volume_type_create(ctxt, dest_type).id
-
-        src_volume = testutils.create_volume(ctxt, host='src',
-                                             migration_status='migrating',
-                                             status='available',
-                                             volume_type_id=source_type_id)
-        dest_volume = testutils.create_volume(ctxt, host='dest',
-                                              migration_status='target:fake',
-                                              status='available',
-                                              volume_type_id=dest_type_id)
-        db.finish_volume_migration(ctxt, src_volume.id, dest_volume.id)
-
-        # Check that we have copied destination volume DB data into source DB
-        # entry so we can keep the id
-        src_volume = objects.Volume.get_by_id(ctxt, src_volume.id)
-        self.assertEqual('dest', src_volume.host)
-        self.assertEqual('available', src_volume.status)
-        self.assertIsNone(src_volume.migration_status)
-        if dest_type:
-            self.assertEqual(dest_type_id, src_volume.volume_type_id)
-        else:
-            self.assertIsNone(src_volume.volume_type_id)
-
-        # Check that we have copied source volume DB data into destination DB
-        # entry and we are setting it to deleting
-        dest_volume = objects.Volume.get_by_id(ctxt, dest_volume.id)
-        self.assertEqual('src', dest_volume.host)
-        self.assertEqual('deleting', dest_volume.status)
-        self.assertEqual('deleting', dest_volume.migration_status)
-        if source_type:
-            self.assertEqual(source_type_id, dest_volume.volume_type_id)
-        else:
-            self.assertIsNone(dest_volume.volume_type_id)
index 4dc3cef7c43c7975828d273d2d82318ecd8922b3..718e7b1f217709039398727a6449b0da48c43475 100644 (file)
@@ -32,7 +32,7 @@ object_data = {
     'ServiceList': '1.0-d242d3384b68e5a5a534e090ff1d5161',
     'Snapshot': '1.0-a6c33eefeadefb324d79f72f66c54e9a',
     'SnapshotList': '1.0-71661e7180ef6cc51501704a9bea4bf1',
-    'Volume': '1.2-97c3977846dae6588381e7bd3e6e6558',
+    'Volume': '1.3-97c3977846dae6588381e7bd3e6e6558',
     'VolumeAttachment': '1.0-f14a7c03ffc5b93701d496251a5263aa',
     'VolumeAttachmentList': '1.0-307d2b6c8dd55ef854f6386898e9e98e',
     'VolumeList': '1.1-03ba6cb8c546683e64e15c50042cb1a3',
index 5a463add4bb669250bbc9e52ed867d272966901b..d9f1dc2ec537815434bf87c0367e7cf703dcbd39 100644 (file)
@@ -322,6 +322,35 @@ class TestVolume(test_objects.BaseObjectsTestCase):
         volume.volume_glance_metadata = [{'key': 'prs', 'value': 'tuw'}]
         self.assertEqual({'prs': 'tuw'}, volume.glance_metadata)
 
+    @mock.patch('cinder.db.volume_metadata_update', return_value={})
+    @mock.patch('cinder.db.volume_update')
+    def test_finish_volume_migration(self, volume_update, metadata_update):
+        src_volume_db = fake_volume.fake_db_volume(**{'id': '1'})
+        dest_volume_db = fake_volume.fake_db_volume(**{'id': '2'})
+        src_volume = objects.Volume._from_db_object(
+            self.context, objects.Volume(), src_volume_db,
+            expected_attrs=['metadata', 'glance_metadata'])
+        dest_volume = objects.Volume._from_db_object(
+            self.context, objects.Volume(), dest_volume_db,
+            expected_attrs=['metadata', 'glance_metadata'])
+        updated_dest_volume = src_volume.finish_volume_migration(
+            dest_volume)
+        self.assertEqual('deleting', updated_dest_volume.migration_status)
+        self.assertEqual('migration src for ' + src_volume.id,
+                         updated_dest_volume.display_description)
+        self.assertEqual(src_volume.id, updated_dest_volume._name_id)
+        self.assertTrue(volume_update.called)
+
+        # Ignore these attributes, since they were updated by
+        # finish_volume_migration
+        ignore_keys = ('id', 'provider_location', '_name_id',
+                       'migration_status', 'display_description', 'status')
+        dest_vol_filtered = {k: v for k, v in updated_dest_volume.iteritems()
+                             if k not in ignore_keys}
+        src_vol_filtered = {k: v for k, v in src_volume.iteritems()
+                            if k not in ignore_keys}
+        self.assertEqual(src_vol_filtered, dest_vol_filtered)
+
 
 class TestVolumeList(test_objects.BaseObjectsTestCase):
     @mock.patch('cinder.db.volume_get_all')
index 40b9c2bd3b7e05eb71bbffbfb1ad2447efe45f49..5327e30e642e4c644017d96674871b3199a805dd 100644 (file)
@@ -1790,11 +1790,12 @@ class VolumeManager(manager.SchedulerDependentManager):
         # the current host and driver object is for the "existing" volume.
         rpcapi.update_migrated_volume(ctxt, volume, new_volume,
                                       orig_volume_status)
+        volume.refresh()
+        new_volume.refresh()
 
         # Swap src and dest DB records so we can continue using the src id and
         # asynchronously delete the destination id
-        __, updated_new = self.db.finish_volume_migration(
-            ctxt, volume.id, new_volume.id)
+        updated_new = volume.finish_volume_migration(new_volume)
         updates = {'status': orig_volume_status,
                    'previous_status': volume.status,
                    'migration_status': 'success'}
@@ -1830,7 +1831,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         if volume is None:
             # For older clients, mimic the old behavior and look up the volume
             # by its volume_id.
-            volume = objects.Volume.get_by_id(context, volume_id)
+            volume = objects.Volume.get_by_id(ctxt, volume_id)
 
         try:
             # NOTE(flaper87): Verify the driver is enabled