From: peter_wang Date: Fri, 21 Aug 2015 05:33:44 +0000 (-0400) Subject: Fix issue of volume after host-assisted migration X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=20ac9cac7284f009929cfd6e5ce79f34bccffce8;p=openstack-build%2Fcinder-build.git Fix issue of volume after host-assisted migration In host-assisted migration, Cinder framework currently doesn't update name_id and provider_location when the driver doesn't provide name_id or provider_location in update_migrated_volume This changes previous Cinder behavior which will update name_id and provider_location anyway. Any operation against this volume will not be successful; for example, volume deletion cannot delete it's underlying LUN. This fix will update name_id and provider_location if driver doesn't return them. Closes-Bug: #1487952 Change-Id: Ice72fc792f0ea8909c027e6df99cff9bbeef5e48 --- diff --git a/cinder/tests/unit/test_volume.py b/cinder/tests/unit/test_volume.py index 3087358a2..cf3d65d25 100644 --- a/cinder/tests/unit/test_volume.py +++ b/cinder/tests/unit/test_volume.py @@ -4427,8 +4427,8 @@ class VolumeMigrationTestCase(VolumeTestCase): self.volume.update_migrated_volume(self.context, volume, new_volume, 'available') volume_update.assert_has_calls(( - mock.call(fake_elevated, volume['id'], fake_update), - mock.call(fake_elevated, new_volume['id'], expected_update))) + mock.call(fake_elevated, new_volume['id'], expected_update), + mock.call(fake_elevated, volume['id'], fake_update))) # Test the case for update_migrated_volume not implemented # for the driver. @@ -4438,8 +4438,8 @@ class VolumeMigrationTestCase(VolumeTestCase): self.volume.update_migrated_volume(self.context, volume, new_volume, 'available') volume_update.assert_has_calls(( - mock.call(fake_elevated, volume['id'], fake_update_error), - mock.call(fake_elevated, new_volume['id'], expected_update))) + mock.call(fake_elevated, new_volume['id'], expected_update), + mock.call(fake_elevated, volume['id'], fake_update_error))) def test_migrate_volume_generic_create_volume_error(self): self.expected_status = 'error' diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index bb00aa800..7a7d0cc18 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -250,10 +250,10 @@ class VolumeManager(manager.SchedulerDependentManager): max_cache_size, max_cache_entries ) - LOG.info(_LI('Image-volume cache enabled for host %(host)s'), + LOG.info(_LI('Image-volume cache enabled for host %(host)s.'), {'host': self.host}) else: - LOG.info(_LI('Image-volume cache disabled for host %(host)s'), + LOG.info(_LI('Image-volume cache disabled for host %(host)s.'), {'host': self.host}) self.image_volume_cache = None @@ -1059,7 +1059,7 @@ class VolumeManager(manager.SchedulerDependentManager): image_meta ) except exception.CinderException as e: - LOG.warning(_LW('Failed to create new image-volume cache entry' + LOG.warning(_LW('Failed to create new image-volume cache entry.' ' Error: %(exception)s'), {'exception': e}) if image_volume: self.delete_volume(ctx, image_volume.id) @@ -3048,6 +3048,10 @@ class VolumeManager(manager.SchedulerDependentManager): # This is temporary fix for bug 1491210. volume = self.db.volume_get(ctxt, volume['id']) new_volume = self.db.volume_get(ctxt, new_volume['id']) + model_update_default = {'_name_id': new_volume['_name_id'] or + new_volume['id'], + 'provider_location': + new_volume['provider_location']} try: model_update = self.driver.update_migrated_volume(ctxt, volume, @@ -3057,19 +3061,32 @@ class VolumeManager(manager.SchedulerDependentManager): # If update_migrated_volume is not implemented for the driver, # _name_id and provider_location will be set with the values # from new_volume. - model_update = {'_name_id': new_volume['_name_id'] or - new_volume['id'], - 'provider_location': - new_volume['provider_location']} + model_update = model_update_default if model_update: - self.db.volume_update(ctxt.elevated(), volume['id'], - model_update) + model_update_default.update(model_update) # Swap keys that were changed in the source so we keep their values # in the temporary volume's DB record. - model_update_new = {key: volume[key] - for key in model_update.iterkeys()} + # Need to convert 'metadata' and 'admin_metadata' since + # they are not keys of volume, their corresponding keys are + # 'volume_metadata' and 'volume_admin_metadata'. + model_update_new = dict() + for key in model_update: + if key == 'metadata': + if volume.get('volume_metadata'): + model_update_new[key] = { + metadata['key']: metadata['value'] + for metadata in volume.get('volume_metadata')} + elif key == 'admin_metadata': + model_update_new[key] = { + metadata['key']: metadata['value'] + for metadata in volume.get( + 'volume_admin_metadata')} + else: + model_update_new[key] = volume[key] self.db.volume_update(ctxt.elevated(), new_volume['id'], model_update_new) + self.db.volume_update(ctxt.elevated(), volume['id'], + model_update_default) # Replication V2 methods def enable_replication(self, context, volume):