]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Fix issue of volume after host-assisted migration
authorpeter_wang <peter.wang13@emc.com>
Fri, 21 Aug 2015 05:33:44 +0000 (01:33 -0400)
committerpeter_wang <peter.wang13@emc.com>
Wed, 16 Sep 2015 16:12:43 +0000 (12:12 -0400)
In host-assisted migration, Cinder framework currently doesn't
update name_id and provider_location when the driver doesn't provide
name_id or provider_location in update_migrated_volume

This changes previous Cinder behavior which will update name_id and
provider_location anyway. Any operation against this volume will not
be successful; for example, volume deletion cannot delete it's
underlying LUN.

This fix will update name_id and provider_location if driver doesn't
return them.

Closes-Bug: #1487952
Change-Id: Ice72fc792f0ea8909c027e6df99cff9bbeef5e48

cinder/tests/unit/test_volume.py
cinder/volume/manager.py

index 3087358a22c623d2fece1712a0d801b3b50f27fb..cf3d65d25006d1a868f3fc1151429263eabd56a7 100644 (file)
@@ -4427,8 +4427,8 @@ class VolumeMigrationTestCase(VolumeTestCase):
             self.volume.update_migrated_volume(self.context, volume,
                                                new_volume, 'available')
             volume_update.assert_has_calls((
-                mock.call(fake_elevated, volume['id'], fake_update),
-                mock.call(fake_elevated, new_volume['id'], expected_update)))
+                mock.call(fake_elevated, new_volume['id'], expected_update),
+                mock.call(fake_elevated, volume['id'], fake_update)))
 
             # Test the case for update_migrated_volume not implemented
             # for the driver.
@@ -4438,8 +4438,8 @@ class VolumeMigrationTestCase(VolumeTestCase):
             self.volume.update_migrated_volume(self.context, volume,
                                                new_volume, 'available')
             volume_update.assert_has_calls((
-                mock.call(fake_elevated, volume['id'], fake_update_error),
-                mock.call(fake_elevated, new_volume['id'], expected_update)))
+                mock.call(fake_elevated, new_volume['id'], expected_update),
+                mock.call(fake_elevated, volume['id'], fake_update_error)))
 
     def test_migrate_volume_generic_create_volume_error(self):
         self.expected_status = 'error'
index bb00aa800953bef5f7cca2d3d752a110358e73f5..7a7d0cc189711f5e6efc1910b2225872a9986d2c 100644 (file)
@@ -250,10 +250,10 @@ class VolumeManager(manager.SchedulerDependentManager):
                 max_cache_size,
                 max_cache_entries
             )
-            LOG.info(_LI('Image-volume cache enabled for host %(host)s'),
+            LOG.info(_LI('Image-volume cache enabled for host %(host)s.'),
                      {'host': self.host})
         else:
-            LOG.info(_LI('Image-volume cache disabled for host %(host)s'),
+            LOG.info(_LI('Image-volume cache disabled for host %(host)s.'),
                      {'host': self.host})
             self.image_volume_cache = None
 
@@ -1059,7 +1059,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                 image_meta
             )
         except exception.CinderException as e:
-            LOG.warning(_LW('Failed to create new image-volume cache entry'
+            LOG.warning(_LW('Failed to create new image-volume cache entry.'
                             ' Error: %(exception)s'), {'exception': e})
             if image_volume:
                 self.delete_volume(ctx, image_volume.id)
@@ -3048,6 +3048,10 @@ class VolumeManager(manager.SchedulerDependentManager):
         # This is temporary fix for bug 1491210.
         volume = self.db.volume_get(ctxt, volume['id'])
         new_volume = self.db.volume_get(ctxt, new_volume['id'])
+        model_update_default = {'_name_id': new_volume['_name_id'] or
+                                new_volume['id'],
+                                'provider_location':
+                                new_volume['provider_location']}
         try:
             model_update = self.driver.update_migrated_volume(ctxt,
                                                               volume,
@@ -3057,19 +3061,32 @@ class VolumeManager(manager.SchedulerDependentManager):
             # If update_migrated_volume is not implemented for the driver,
             # _name_id and provider_location will be set with the values
             # from new_volume.
-            model_update = {'_name_id': new_volume['_name_id'] or
-                            new_volume['id'],
-                            'provider_location':
-                            new_volume['provider_location']}
+            model_update = model_update_default
         if model_update:
-            self.db.volume_update(ctxt.elevated(), volume['id'],
-                                  model_update)
+            model_update_default.update(model_update)
             # Swap keys that were changed in the source so we keep their values
             # in the temporary volume's DB record.
-            model_update_new = {key: volume[key]
-                                for key in model_update.iterkeys()}
+            # Need to convert 'metadata' and 'admin_metadata' since
+            # they are not keys of volume, their corresponding keys are
+            # 'volume_metadata' and 'volume_admin_metadata'.
+            model_update_new = dict()
+            for key in model_update:
+                if key == 'metadata':
+                    if volume.get('volume_metadata'):
+                        model_update_new[key] = {
+                            metadata['key']: metadata['value']
+                            for metadata in volume.get('volume_metadata')}
+                elif key == 'admin_metadata':
+                        model_update_new[key] = {
+                            metadata['key']: metadata['value']
+                            for metadata in volume.get(
+                                'volume_admin_metadata')}
+                else:
+                    model_update_new[key] = volume[key]
             self.db.volume_update(ctxt.elevated(), new_volume['id'],
                                   model_update_new)
+        self.db.volume_update(ctxt.elevated(), volume['id'],
+                              model_update_default)
 
     # Replication V2 methods
     def enable_replication(self, context, volume):