]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Remove the destination volume check in delete_volume
authorVincent Hou <sbhou@cn.ibm.com>
Tue, 1 Sep 2015 03:17:04 +0000 (11:17 +0800)
committerVincent Hou <sbhou@cn.ibm.com>
Fri, 25 Sep 2015 05:39:44 +0000 (22:39 -0700)
* If the volume migration fails in the middle of the progress,
the destination volume will be cleaned up via
rpcapi.delete_volume. The method delete_volume in manager
should allow us to clean up the database record for the
destination volume.

* The variable update in migrate_volume_completion has been
assigned twice. Remove the second one.

Change-Id: Icb815476943053f1487268b33c28f3575c4fd0dc
Closes-Bug: #1490445

cinder/tests/unit/test_volume.py
cinder/volume/manager.py

index 227b1b530423657fcafa747a78a8f8d3a8c59142..e060d8d9f8d400478c512d0ba546d79b0f5112e9 100644 (file)
@@ -4801,6 +4801,27 @@ class VolumeMigrationTestCase(VolumeTestCase):
         self.volume.driver._initialized = True
         self.volume.delete_volume(self.context, volume['id'])
 
+    def test_delete_source_volume_in_migration(self):
+        """Test deleting a source volume that is in migration."""
+        self._test_delete_volume_in_migration('migrating')
+
+    def test_delete_destination_volume_in_migration(self):
+        """Test deleting a destination volume that is in migration."""
+        self._test_delete_volume_in_migration('target:vol-id')
+
+    def _test_delete_volume_in_migration(self, migration_status):
+        """Test deleting a volume that is in migration."""
+        volume = tests_utils.create_volume(self.context, **self.volume_params)
+        volume = db.volume_update(self.context, volume['id'],
+                                  {'status': 'available',
+                                   'migration_status': migration_status})
+        self.volume.delete_volume(self.context, volume['id'])
+
+        # The volume is successfully removed during the volume delete
+        # and won't exist in the database any more.
+        self.assertRaises(exception.VolumeNotFound, db.volume_get,
+                          self.context, volume['id'])
+
 
 class ConsistencyGroupTestCase(BaseVolumeTestCase):
     def test_delete_volume_in_consistency_group(self):
index 7a7d0cc189711f5e6efc1910b2225872a9986d2c..d5d46323674b89b5a193fcf09c27f82a0f297b08 100644 (file)
@@ -579,15 +579,9 @@ class VolumeManager(manager.SchedulerDependentManager):
         1. Delete a volume(normal case)
            Delete a volume and update quotas.
 
-        2. Delete a migration source volume
-           If deleting the source volume in a migration, we want to skip
-           quotas. Also we want to skip other database updates for source
-           volume because these update will be handled at
-           migrate_volume_completion properly.
-
-        3. Delete a migration destination volume
-           If deleting the destination volume in a migration, we want to
-           skip quotas but we need database updates for the volume.
+        2. Delete a migration volume
+           If deleting the volume in a migration, we want to skip
+           quotas but we need database updates for the volume.
       """
 
         context = context.elevated()
@@ -666,15 +660,10 @@ class VolumeManager(manager.SchedulerDependentManager):
                 LOG.exception(_LE("Failed to update usages deleting volume."),
                               resource=volume_ref)
 
-        # If deleting the destination volume in a migration, we should skip
-        # database update here. In other cases, continue to update database
-        # entries.
-        if not is_migrating_dest:
+        # Delete glance metadata if it exists
+        self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
 
-            # Delete glance metadata if it exists
-            self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
-
-            self.db.volume_destroy(context, volume_id)
+        self.db.volume_destroy(context, volume_id)
 
         # If deleting source/destination volume in a migration, we should
         # skip quotas.
@@ -1784,9 +1773,6 @@ class VolumeManager(manager.SchedulerDependentManager):
             LOG.error(_LE('Failed to request async delete of migration source '
                           'vol %(vol)s: %(err)s'),
                       {'vol': volume_id, 'err': ex})
-        updates = {'migration_status': 'success',
-                   'status': orig_volume_status,
-                   'previous_status': volume['status']}
 
         LOG.info(_LI("Complete-Migrate volume completed successfully."),
                  resource=volume)