"""Delete a volume."""
context = req.environ['cinder.context']
+ cascade = utils.get_bool_param('cascade', req.params)
+
LOG.info(_LI("Delete volume with id: %s"), id, context=context)
try:
volume = self.volume_api.get(context, id)
- self.volume_api.delete(context, volume)
+ self.volume_api.delete(context, volume, cascade=cascade)
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
return webob.Response(status_int=202)
return IMPL.volume_has_snapshots_filter()
+def volume_has_undeletable_snapshots_filter():
+ return IMPL.volume_has_undeletable_snapshots_filter()
+
+
def volume_has_attachments_filter():
return IMPL.volume_has_attachments_filter()
~models.Snapshot.deleted))
+def volume_has_undeletable_snapshots_filter():
+ deletable_statuses = ['available', 'error']
+ return sql.exists().where(
+ and_(models.Volume.id == models.Snapshot.volume_id,
+ ~models.Snapshot.deleted,
+ or_(models.Snapshot.cgsnapshot_id != None, # noqa: != None
+ models.Snapshot.status.notin_(deletable_statuses))))
+
+
def volume_has_attachments_filter():
return sql.exists().where(
and_(models.Volume.id == models.VolumeAttachment.volume_id,
res = self._get_resp(vol.id)
self.assertEqual(202, res.status_int, res)
- mock_rpcapi.assert_called_once_with(self.ctxt, mock.ANY, True)
+ mock_rpcapi.assert_called_once_with(self.ctxt, mock.ANY, True, False)
vol = objects.volume.Volume.get_by_id(self.ctxt, vol.id)
self.assertEqual('deleting', vol.status)
db.volume_destroy(self.ctxt, vol.id)
self.assertEqual(202, resp.status_int)
def test_volume_delete_attached(self):
- def stub_volume_attached(self, context, volume, force=False):
+ def stub_volume_attached(self, context, volume,
+ force=False, cascade=False):
raise exception.VolumeAttached(volume_id=volume['id'])
self.stubs.Set(volume_api.API, "delete", stub_volume_attached)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
None,
connector)
+ def test_cascade_delete_volume_with_snapshots(self):
+ """Test volume deletion with dependent snapshots."""
+ volume = tests_utils.create_volume(self.context, **self.volume_params)
+ self.volume.create_volume(self.context, volume['id'])
+ snapshot = self._create_snapshot(volume['id'], size=volume['size'])
+ self.volume.create_snapshot(self.context, volume['id'], snapshot)
+ self.assertEqual(
+ snapshot.id, objects.Snapshot.get_by_id(self.context,
+ snapshot.id).id)
+
+ volume['status'] = 'available'
+ volume['host'] = 'fakehost'
+
+ volume_api = cinder.volume.api.API()
+
+ volume_api.delete(self.context,
+ volume,
+ cascade=True)
+
+ def test_cascade_delete_volume_with_snapshots_error(self):
+ """Test volume deletion with dependent snapshots."""
+ volume = tests_utils.create_volume(self.context, **self.volume_params)
+ self.volume.create_volume(self.context, volume['id'])
+ snapshot = self._create_snapshot(volume['id'], size=volume['size'])
+ self.volume.create_snapshot(self.context, volume['id'], snapshot)
+ self.assertEqual(
+ snapshot.id, objects.Snapshot.get_by_id(self.context,
+ snapshot.id).id)
+
+ snapshot.update({'status': 'in-use'})
+ snapshot.save()
+
+ volume['status'] = 'available'
+ volume['host'] = 'fakehost'
+
+ volume_api = cinder.volume.api.API()
+
+ self.assertRaises(exception.InvalidVolume,
+ volume_api.delete,
+ self.context,
+ volume,
+ cascade=True)
+
@ddt.ddt
class VolumeMigrationTestCase(VolumeTestCase):
rpc_method='cast',
volume=self.fake_volume_obj,
unmanage_only=False,
- version='1.33')
- can_send_version.assert_called_once_with('1.33')
+ cascade=False,
+ version='1.40')
+ can_send_version.assert_any_call('1.40')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=False)
volume=self.fake_volume_obj,
unmanage_only=False,
version='1.15')
- can_send_version.assert_called_once_with('1.33')
+ can_send_version.assert_any_call('1.33')
+
+ @mock.patch('oslo_messaging.RPCClient.can_send_version',
+ return_value=True)
+ def test_delete_volume_cascade(self, can_send_version):
+ self._test_volume_api('delete_volume',
+ rpc_method='cast',
+ volume=self.fake_volume_obj,
+ unmanage_only=False,
+ cascade=True,
+ version='1.40')
+ can_send_version.assert_any_call('1.33')
+ can_send_version.assert_any_call('1.40')
def test_create_snapshot(self):
self._test_volume_api('create_snapshot',
return vref
@wrap_check_policy
- def delete(self, context, volume, force=False, unmanage_only=False):
+ def delete(self, context, volume,
+ force=False,
+ unmanage_only=False,
+ cascade=False):
if context.is_admin and context.project_id != volume.project_id:
project_id = volume.project_id
else:
expected['status'] = ('available', 'error', 'error_restoring',
'error_extending')
- # Volume cannot have snapshots if we want to delete it
- filters = [~db.volume_has_snapshots_filter()]
+ if cascade:
+ # Allow deletion if all snapshots are in an expected state
+ filters = [~db.volume_has_undeletable_snapshots_filter()]
+ else:
+ # Don't allow deletion of volume with snapshots
+ filters = [~db.volume_has_snapshots_filter()]
values = {'status': 'deleting', 'terminated_at': timeutils.utcnow()}
result = volume.conditional_update(values, expected, filters)
LOG.info(msg)
raise exception.InvalidVolume(reason=msg)
+ if cascade:
+ values = {'status': 'deleting'}
+ expected = {'status': ('available', 'error', 'deleting'),
+ 'cgsnapshot_id': None}
+ snapshots = objects.snapshot.SnapshotList.get_all_for_volume(
+ context, volume.id)
+ for s in snapshots:
+ result = s.conditional_update(values, expected, filters)
+
+ if not result:
+ volume.update({'status': 'error_deleting'})
+ volume.save()
+
+ msg = _('Failed to update snapshot.')
+ raise exception.InvalidVolume(reason=msg)
+
cache = image_cache.ImageVolumeCache(self.db, self)
entry = cache.get_by_image_volume(context, volume.id)
if entry:
msg = _("Unable to delete encrypted volume: %s.") % e.msg
raise exception.InvalidVolume(reason=msg)
- self.volume_rpcapi.delete_volume(context, volume, unmanage_only)
+ self.volume_rpcapi.delete_volume(context,
+ volume,
+ unmanage_only,
+ cascade)
LOG.info(_LI("Delete volume request issued successfully."),
resource=volume)
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
- RPC_API_VERSION = '1.39'
+ RPC_API_VERSION = '1.40'
target = messaging.Target(version=RPC_API_VERSION)
return vol_ref.id
@locked_volume_operation
- def delete_volume(self, context, volume_id, unmanage_only=False,
- volume=None):
+ def delete_volume(self, context, volume_id,
+ unmanage_only=False,
+ volume=None,
+ cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
+ if unmanage_only and cascade:
+ # This could be done, but is ruled out for now just
+ # for simplicity.
+ raise exception.Invalid(
+ reason=_("Unmanage and cascade delete options "
+ "are mutually exclusive."))
+
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
+ elif cascade:
+ LOG.debug('Performing cascade delete.')
+ snapshots = objects.SnapshotList.get_all_for_volume(context,
+ volume.id)
+ for s in snapshots:
+ if s.status != 'deleting':
+ self._clear_db(context, is_migrating_dest, volume,
+ 'error_deleting')
+
+ msg = (_("Snapshot %(id)s was found in state "
+ "%(state)s rather than 'deleting' during "
+ "cascade delete.") % {'id': s.id,
+ 'state': s.status})
+ raise exception.InvalidSnapshot(reason=msg)
+
+ self.delete_snapshot(context, s)
+
+ LOG.debug('Snapshots deleted, issuing volume delete')
+ self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
1.38 - Scaling backup service, add get_backup_device() and
secure_file_operations_enabled()
1.39 - Update replication methods to reflect new backend rep strategy
+ 1.40 - Add cascade option to delete_volume().
"""
- RPC_API_VERSION = '1.39'
+ RPC_API_VERSION = '1.40'
TOPIC = CONF.volume_topic
BINARY = 'cinder-volume'
request_spec_p = jsonutils.to_primitive(request_spec)
cctxt.cast(ctxt, 'create_volume', **msg_args)
- def delete_volume(self, ctxt, volume, unmanage_only=False):
+ def delete_volume(self, ctxt, volume, unmanage_only=False, cascade=False):
msg_args = {'volume_id': volume.id, 'unmanage_only': unmanage_only}
+
+ version = '1.15'
+
if self.client.can_send_version('1.33'):
version = '1.33'
msg_args['volume'] = volume
- else:
- version = '1.15'
+
+ if self.client.can_send_version('1.40'):
+ version = '1.40'
+ if cascade:
+ msg_args['cascade'] = cascade
+ elif cascade:
+ msg = _('Cascade option is not supported.')
+ raise exception.Invalid(reason=msg)
cctxt = self._get_cctxt(volume.host, version)
cctxt.cast(ctxt, 'delete_volume', **msg_args)