]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Delete volumes with snapshots
authorEric Harney <eharney@redhat.com>
Thu, 7 Jan 2016 21:25:52 +0000 (16:25 -0500)
committerEric Harney <eharney@redhat.com>
Sun, 28 Feb 2016 14:11:37 +0000 (14:11 +0000)
This adds the 'cascade' parameter to volume delete,
which deletes snapshots along with a volume in
one call.

This is done in the volume manager, and not in
a driver-optimized way, which will be a later
improvement.

Blueprint: del-vols-with-snaps
Change-Id: I33d15b76d4bd0de14c635d404b2c97096c977a58

cinder/api/v2/volumes.py
cinder/db/api.py
cinder/db/sqlalchemy/api.py
cinder/tests/unit/api/contrib/test_volume_unmanage.py
cinder/tests/unit/api/v2/test_volumes.py
cinder/tests/unit/test_volume.py
cinder/tests/unit/test_volume_rpcapi.py
cinder/volume/api.py
cinder/volume/manager.py
cinder/volume/rpcapi.py

index d7aa1c0ba7baaa480c742214246399adb79c9455..4a4d16d23c0ca6ef7ebaad76068fe9a6efa215bb 100644 (file)
@@ -186,11 +186,13 @@ class VolumeController(wsgi.Controller):
         """Delete a volume."""
         context = req.environ['cinder.context']
 
+        cascade = utils.get_bool_param('cascade', req.params)
+
         LOG.info(_LI("Delete volume with id: %s"), id, context=context)
 
         try:
             volume = self.volume_api.get(context, id)
-            self.volume_api.delete(context, volume)
+            self.volume_api.delete(context, volume, cascade=cascade)
         except exception.VolumeNotFound as error:
             raise exc.HTTPNotFound(explanation=error.msg)
         return webob.Response(status_int=202)
index abde7c802420c3d61170de78865675abf28ed049..c0c7b1b3dcea168d477669fec9af59a34ca9a8b1 100644 (file)
@@ -255,6 +255,10 @@ def volume_has_snapshots_filter():
     return IMPL.volume_has_snapshots_filter()
 
 
+def volume_has_undeletable_snapshots_filter():
+    return IMPL.volume_has_undeletable_snapshots_filter()
+
+
 def volume_has_attachments_filter():
     return IMPL.volume_has_attachments_filter()
 
index 407c4999c4c8ac857b6d616c1e4f724d91550bae..d8bd2474077ccb470ae2020a475018bf61ea06a0 100644 (file)
@@ -1831,6 +1831,15 @@ def volume_has_snapshots_filter():
              ~models.Snapshot.deleted))
 
 
+def volume_has_undeletable_snapshots_filter():
+    deletable_statuses = ['available', 'error']
+    return sql.exists().where(
+        and_(models.Volume.id == models.Snapshot.volume_id,
+             ~models.Snapshot.deleted,
+             or_(models.Snapshot.cgsnapshot_id != None,  # noqa: != None
+                 models.Snapshot.status.notin_(deletable_statuses))))
+
+
 def volume_has_attachments_filter():
     return sql.exists().where(
         and_(models.Volume.id == models.VolumeAttachment.volume_id,
index eb4ec9846b70dc5d31080e6695d14d9a4cbf4342..2fdfa29ced717fbe766845d25903a92da6e099b4 100644 (file)
@@ -65,7 +65,7 @@ class VolumeUnmanageTest(test.TestCase):
         res = self._get_resp(vol.id)
         self.assertEqual(202, res.status_int, res)
 
-        mock_rpcapi.assert_called_once_with(self.ctxt, mock.ANY, True)
+        mock_rpcapi.assert_called_once_with(self.ctxt, mock.ANY, True, False)
         vol = objects.volume.Volume.get_by_id(self.ctxt, vol.id)
         self.assertEqual('deleting', vol.status)
         db.volume_destroy(self.ctxt, vol.id)
index 63271508c4b514d0da0bbb0173c3e482e19b0b67..6dafffeab8bd61b510e426f3b61316bdb3b15bcd 100644 (file)
@@ -1283,7 +1283,8 @@ class VolumeApiTest(test.TestCase):
         self.assertEqual(202, resp.status_int)
 
     def test_volume_delete_attached(self):
-        def stub_volume_attached(self, context, volume, force=False):
+        def stub_volume_attached(self, context, volume,
+                                 force=False, cascade=False):
             raise exception.VolumeAttached(volume_id=volume['id'])
         self.stubs.Set(volume_api.API, "delete", stub_volume_attached)
         self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
index 2b0ca5e4958d1cead950dc74645b50fb3a0d50ce..6cf65569bc5a2c18564cf5110579af8ab0e271e5 100644 (file)
@@ -4298,6 +4298,49 @@ class VolumeTestCase(BaseVolumeTestCase):
                           None,
                           connector)
 
+    def test_cascade_delete_volume_with_snapshots(self):
+        """Test volume deletion with dependent snapshots."""
+        volume = tests_utils.create_volume(self.context, **self.volume_params)
+        self.volume.create_volume(self.context, volume['id'])
+        snapshot = self._create_snapshot(volume['id'], size=volume['size'])
+        self.volume.create_snapshot(self.context, volume['id'], snapshot)
+        self.assertEqual(
+            snapshot.id, objects.Snapshot.get_by_id(self.context,
+                                                    snapshot.id).id)
+
+        volume['status'] = 'available'
+        volume['host'] = 'fakehost'
+
+        volume_api = cinder.volume.api.API()
+
+        volume_api.delete(self.context,
+                          volume,
+                          cascade=True)
+
+    def test_cascade_delete_volume_with_snapshots_error(self):
+        """Test volume deletion with dependent snapshots."""
+        volume = tests_utils.create_volume(self.context, **self.volume_params)
+        self.volume.create_volume(self.context, volume['id'])
+        snapshot = self._create_snapshot(volume['id'], size=volume['size'])
+        self.volume.create_snapshot(self.context, volume['id'], snapshot)
+        self.assertEqual(
+            snapshot.id, objects.Snapshot.get_by_id(self.context,
+                                                    snapshot.id).id)
+
+        snapshot.update({'status': 'in-use'})
+        snapshot.save()
+
+        volume['status'] = 'available'
+        volume['host'] = 'fakehost'
+
+        volume_api = cinder.volume.api.API()
+
+        self.assertRaises(exception.InvalidVolume,
+                          volume_api.delete,
+                          self.context,
+                          volume,
+                          cascade=True)
+
 
 @ddt.ddt
 class VolumeMigrationTestCase(VolumeTestCase):
index aa34d7eb1a01873649e501ac7ddb7b6f83f65c87..3f923cdea48b7da3f3a59d9f5f808310ad4ec9f8 100644 (file)
@@ -291,8 +291,9 @@ class VolumeRpcAPITestCase(test.TestCase):
                               rpc_method='cast',
                               volume=self.fake_volume_obj,
                               unmanage_only=False,
-                              version='1.33')
-        can_send_version.assert_called_once_with('1.33')
+                              cascade=False,
+                              version='1.40')
+        can_send_version.assert_any_call('1.40')
 
     @mock.patch('oslo_messaging.RPCClient.can_send_version',
                 return_value=False)
@@ -302,7 +303,19 @@ class VolumeRpcAPITestCase(test.TestCase):
                               volume=self.fake_volume_obj,
                               unmanage_only=False,
                               version='1.15')
-        can_send_version.assert_called_once_with('1.33')
+        can_send_version.assert_any_call('1.33')
+
+    @mock.patch('oslo_messaging.RPCClient.can_send_version',
+                return_value=True)
+    def test_delete_volume_cascade(self, can_send_version):
+        self._test_volume_api('delete_volume',
+                              rpc_method='cast',
+                              volume=self.fake_volume_obj,
+                              unmanage_only=False,
+                              cascade=True,
+                              version='1.40')
+        can_send_version.assert_any_call('1.33')
+        can_send_version.assert_any_call('1.40')
 
     def test_create_snapshot(self):
         self._test_volume_api('create_snapshot',
index f6514d28cfc1859de7ea63618251bfae01070c28..3d3f9429cb29eda067c600fb95bf5c42be42c7be 100644 (file)
@@ -329,7 +329,10 @@ class API(base.Base):
             return vref
 
     @wrap_check_policy
-    def delete(self, context, volume, force=False, unmanage_only=False):
+    def delete(self, context, volume,
+               force=False,
+               unmanage_only=False,
+               cascade=False):
         if context.is_admin and context.project_id != volume.project_id:
             project_id = volume.project_id
         else:
@@ -374,8 +377,12 @@ class API(base.Base):
             expected['status'] = ('available', 'error', 'error_restoring',
                                   'error_extending')
 
-        # Volume cannot have snapshots if we want to delete it
-        filters = [~db.volume_has_snapshots_filter()]
+        if cascade:
+            # Allow deletion if all snapshots are in an expected state
+            filters = [~db.volume_has_undeletable_snapshots_filter()]
+        else:
+            # Don't allow deletion of volume with snapshots
+            filters = [~db.volume_has_snapshots_filter()]
         values = {'status': 'deleting', 'terminated_at': timeutils.utcnow()}
 
         result = volume.conditional_update(values, expected, filters)
@@ -388,6 +395,22 @@ class API(base.Base):
             LOG.info(msg)
             raise exception.InvalidVolume(reason=msg)
 
+        if cascade:
+            values = {'status': 'deleting'}
+            expected = {'status': ('available', 'error', 'deleting'),
+                        'cgsnapshot_id': None}
+            snapshots = objects.snapshot.SnapshotList.get_all_for_volume(
+                context, volume.id)
+            for s in snapshots:
+                result = s.conditional_update(values, expected, filters)
+
+                if not result:
+                    volume.update({'status': 'error_deleting'})
+                    volume.save()
+
+                    msg = _('Failed to update snapshot.')
+                    raise exception.InvalidVolume(reason=msg)
+
         cache = image_cache.ImageVolumeCache(self.db, self)
         entry = cache.get_by_image_volume(context, volume.id)
         if entry:
@@ -404,7 +427,10 @@ class API(base.Base):
                 msg = _("Unable to delete encrypted volume: %s.") % e.msg
                 raise exception.InvalidVolume(reason=msg)
 
-        self.volume_rpcapi.delete_volume(context, volume, unmanage_only)
+        self.volume_rpcapi.delete_volume(context,
+                                         volume,
+                                         unmanage_only,
+                                         cascade)
         LOG.info(_LI("Delete volume request issued successfully."),
                  resource=volume)
 
index fdf14716e7f40acda5aa82e6698f16a1b9705d5a..3bb833000e29f80513d79c8e1b4df57e1b4d5767 100644 (file)
@@ -204,7 +204,7 @@ def locked_snapshot_operation(f):
 class VolumeManager(manager.SchedulerDependentManager):
     """Manages attachable block storage devices."""
 
-    RPC_API_VERSION = '1.39'
+    RPC_API_VERSION = '1.40'
 
     target = messaging.Target(version=RPC_API_VERSION)
 
@@ -636,8 +636,10 @@ class VolumeManager(manager.SchedulerDependentManager):
         return vol_ref.id
 
     @locked_volume_operation
-    def delete_volume(self, context, volume_id, unmanage_only=False,
-                      volume=None):
+    def delete_volume(self, context, volume_id,
+                      unmanage_only=False,
+                      volume=None,
+                      cascade=False):
         """Deletes and unexports volume.
 
         1. Delete a volume(normal case)
@@ -675,6 +677,13 @@ class VolumeManager(manager.SchedulerDependentManager):
             raise exception.InvalidVolume(
                 reason=_("volume is not local to this node"))
 
+        if unmanage_only and cascade:
+            # This could be done, but is ruled out for now just
+            # for simplicity.
+            raise exception.Invalid(
+                reason=_("Unmanage and cascade delete options "
+                         "are mutually exclusive."))
+
         # The status 'deleting' is not included, because it only applies to
         # the source volume to be deleted after a migration. No quota
         # needs to be handled for it.
@@ -693,6 +702,25 @@ class VolumeManager(manager.SchedulerDependentManager):
             self.driver.remove_export(context, volume)
             if unmanage_only:
                 self.driver.unmanage(volume)
+            elif cascade:
+                LOG.debug('Performing cascade delete.')
+                snapshots = objects.SnapshotList.get_all_for_volume(context,
+                                                                    volume.id)
+                for s in snapshots:
+                    if s.status != 'deleting':
+                        self._clear_db(context, is_migrating_dest, volume,
+                                       'error_deleting')
+
+                        msg = (_("Snapshot %(id)s was found in state "
+                                 "%(state)s rather than 'deleting' during "
+                                 "cascade delete.") % {'id': s.id,
+                                                       'state': s.status})
+                        raise exception.InvalidSnapshot(reason=msg)
+
+                    self.delete_snapshot(context, s)
+
+                LOG.debug('Snapshots deleted, issuing volume delete')
+                self.driver.delete_volume(volume)
             else:
                 self.driver.delete_volume(volume)
         except exception.VolumeIsBusy:
index 2a785133caee634b1b735c8bf030a5fb943ab9fe..3967f71f2d7b093216ef39d6e505655dd2ef39c7 100644 (file)
@@ -92,9 +92,10 @@ class VolumeAPI(rpc.RPCAPI):
         1.38 - Scaling backup service, add get_backup_device() and
                secure_file_operations_enabled()
         1.39 - Update replication methods to reflect new backend rep strategy
+        1.40 - Add cascade option to delete_volume().
     """
 
-    RPC_API_VERSION = '1.39'
+    RPC_API_VERSION = '1.40'
     TOPIC = CONF.volume_topic
     BINARY = 'cinder-volume'
 
@@ -152,13 +153,22 @@ class VolumeAPI(rpc.RPCAPI):
         request_spec_p = jsonutils.to_primitive(request_spec)
         cctxt.cast(ctxt, 'create_volume', **msg_args)
 
-    def delete_volume(self, ctxt, volume, unmanage_only=False):
+    def delete_volume(self, ctxt, volume, unmanage_only=False, cascade=False):
         msg_args = {'volume_id': volume.id, 'unmanage_only': unmanage_only}
+
+        version = '1.15'
+
         if self.client.can_send_version('1.33'):
             version = '1.33'
             msg_args['volume'] = volume
-        else:
-            version = '1.15'
+
+        if self.client.can_send_version('1.40'):
+            version = '1.40'
+            if cascade:
+                msg_args['cascade'] = cascade
+        elif cascade:
+            msg = _('Cascade option is not supported.')
+            raise exception.Invalid(reason=msg)
 
         cctxt = self._get_cctxt(volume.host, version)
         cctxt.cast(ctxt, 'delete_volume', **msg_args)