from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import exception
-from cinder.i18n import _, _LI
+from cinder.i18n import _LI
from cinder import volume
LOG = logging.getLogger(__name__)
self.volume_api.delete(context, vol, unmanage_only=True)
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
- except exception.VolumeAttached:
- msg = _("Volume cannot be deleted while in attached state")
- raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
self.volume_api.delete(context, volume)
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
- except exception.VolumeAttached:
- msg = _("Volume cannot be deleted while in attached state")
- raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.serializers(xml=VolumesTemplate)
return IMPL.volume_update_status_based_on_attachment(context, volume_id)
+def volume_has_snapshots_filter():
+ return IMPL.volume_has_snapshots_filter()
+
+
def volume_has_attachments_filter():
return IMPL.volume_has_attachments_filter()
return volume_ref
+def volume_has_snapshots_filter():
+ return sql.exists().where(
+ and_(models.Volume.id == models.Snapshot.volume_id,
+ ~models.Snapshot.deleted))
+
+
def volume_has_attachments_filter():
return sql.exists().where(
and_(models.Volume.id == models.VolumeAttachment.volume_id,
res = req.get_response(app())
return res
+ @mock.patch('cinder.db.conditional_update', return_value=1)
@mock.patch('cinder.db.snapshot_update')
- @mock.patch('cinder.objects.volume.Volume.get_by_id')
- @mock.patch('cinder.db.volume_get')
+ @mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_snapshot')
- def test_unmanage_snapshot_ok(self, mock_rpcapi, mock_db,
- mock_volume_get_by_id, mock_db_update):
+ def test_unmanage_snapshot_ok(self, mock_rpcapi, mock_volume_get_by_id,
+ mock_db_update, mock_conditional_update):
"""Return success for valid and unattached volume."""
ctxt = context.RequestContext('admin', 'fake', True)
volume = fake_volume.fake_volume_obj(ctxt, id='fake_volume_id')
mock_volume_get_by_id.return_value = volume
res = self._get_resp(snapshot_id)
- self.assertEqual(1, mock_db.call_count)
- self.assertEqual(2, len(mock_db.call_args[0]), mock_db.call_args)
- self.assertEqual('fake_volume_id', mock_db.call_args[0][1])
+ self.assertEqual(1, mock_volume_get_by_id.call_count)
+ self.assertEqual(2, len(mock_volume_get_by_id.call_args[0]),
+ mock_volume_get_by_id.call_args)
+ self.assertEqual('fake_volume_id',
+ mock_volume_get_by_id.call_args[0][1])
self.assertEqual(1, mock_rpcapi.call_count)
self.assertEqual(3, len(mock_rpcapi.call_args[0]))
import webob
from cinder import context
-from cinder import exception
+from cinder import db
+from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
-from cinder.tests.unit import fake_snapshot
-from cinder.tests.unit import fake_volume
+from cinder.tests.unit import utils
-# This list of fake volumes is used by our tests. Each is configured in a
-# slightly different way, and includes only the properties that are required
-# for these particular tests to function correctly.
-snapshot_vol_id = 'ffffffff-0000-ffff-0000-fffffffffffd'
-detached_vol_id = 'ffffffff-0000-ffff-0000-fffffffffffe'
-attached_vol_id = 'ffffffff-0000-ffff-0000-ffffffffffff'
-bad_vol_id = 'ffffffff-0000-ffff-0000-fffffffffff0'
-
-vols = {snapshot_vol_id: {'id': snapshot_vol_id,
- 'status': 'available',
- 'attach_status': 'detached',
- 'host': 'fake_host',
- 'project_id': 'fake_project',
- 'migration_status': None,
- 'consistencygroup_id': None,
- 'encryption_key_id': None},
- detached_vol_id: {'id': detached_vol_id,
- 'status': 'available',
- 'attach_status': 'detached',
- 'host': 'fake_host',
- 'project_id': 'fake_project',
- 'migration_status': None,
- 'consistencygroup_id': None,
- 'encryption_key_id': None},
- attached_vol_id: {'id': attached_vol_id,
- 'status': 'available',
- 'attach_status': 'attached',
- 'host': 'fake_host',
- 'project_id': 'fake_project',
- 'migration_status': None,
- 'consistencygroup_id': None,
- 'encryption_key_id': None}
- }
-
-
-def app():
- # no auth, just let environ['cinder.context'] pass through
- api = fakes.router.APIRouter()
- mapper = fakes.urlmap.URLMap()
- mapper['/v2'] = api
- return mapper
-
-
-def api_get(self, context, volume_id):
- """Replacement for cinder.volume.api.API.get.
-
- We stub the cinder.volume.api.API.get method to check for the existence
- of volume_id in our list of fake volumes and raise an exception if the
- specified volume ID is not in our list.
- """
- vol = vols.get(volume_id, None)
-
- if not vol:
- raise exception.VolumeNotFound(volume_id)
-
- return fake_volume.fake_volume_obj(context, **vol)
-
-
-def db_snapshot_get_all_for_volume(context, volume_id):
- """Replacement for cinder.db.snapshot_get_all_for_volume.
-
- We stub the cinder.db.snapshot_get_all_for_volume method because when we
- go to unmanage a volume, the code checks for snapshots and won't unmanage
- volumes with snapshots. For these tests, only the snapshot_vol_id reports
- any snapshots. The delete code just checks for array length, doesn't
- inspect the contents.
- """
- if volume_id == snapshot_vol_id:
- db_snapshot = {'volume_id': volume_id}
- snapshot = fake_snapshot.fake_db_snapshot(**db_snapshot)
- return [snapshot]
- return []
-
-
-@mock.patch('cinder.volume.api.API.get', api_get)
-@mock.patch('cinder.db.snapshot_get_all_for_volume',
- db_snapshot_get_all_for_volume)
class VolumeUnmanageTest(test.TestCase):
"""Test cases for cinder/api/contrib/volume_unmanage.py
def setUp(self):
super(VolumeUnmanageTest, self).setUp()
+ self.ctxt = context.RequestContext('admin', 'fake_project', True)
+
+ api = fakes.router.APIRouter()
+ self.app = fakes.urlmap.URLMap()
+ self.app['/v2'] = api
def _get_resp(self, volume_id):
"""Helper to build an os-unmanage req for the specified volume_id."""
- req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume_id)
+ req = webob.Request.blank('/v2/%s/volumes/%s/action' %
+ (self.ctxt.project_id, volume_id))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
- req.environ['cinder.context'] = context.RequestContext('admin',
- 'fake',
- True)
+ req.environ['cinder.context'] = self.ctxt
body = {'os-unmanage': ''}
req.body = jsonutils.dump_as_bytes(body)
- res = req.get_response(app())
+ res = req.get_response(self.app)
return res
- @mock.patch('cinder.db.volume_update')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_volume')
- def test_unmanage_volume_ok(self, mock_rpcapi, mock_db):
+ def test_unmanage_volume_ok(self, mock_rpcapi):
"""Return success for valid and unattached volume."""
- res = self._get_resp(detached_vol_id)
-
- # volume_update is (context, id, new_data)
- self.assertEqual(1, mock_db.call_count)
- self.assertEqual(3, len(mock_db.call_args[0]), mock_db.call_args)
- self.assertEqual(detached_vol_id, mock_db.call_args[0][1])
-
- # delete_volume is (context, status, unmanageOnly)
- self.assertEqual(1, mock_rpcapi.call_count)
- self.assertEqual(3, len(mock_rpcapi.call_args[0]))
- self.assertTrue(mock_rpcapi.call_args[0][2])
-
+ vol = utils.create_volume(self.ctxt)
+ res = self._get_resp(vol.id)
self.assertEqual(202, res.status_int, res)
+ mock_rpcapi.called_once_with(self.ctxt, mock.ANY, unmanage_only=True)
+ vol = objects.volume.Volume.get_by_id(self.ctxt, vol.id)
+ self.assertEqual('deleting', vol.status)
+ db.volume_destroy(self.ctxt, vol.id)
+
def test_unmanage_volume_bad_volume_id(self):
"""Return 404 if the volume does not exist."""
- res = self._get_resp(bad_vol_id)
+ res = self._get_resp('nonexistent-volume-id')
self.assertEqual(404, res.status_int, res)
- def test_unmanage_volume_attached_(self):
+ def test_unmanage_volume_attached(self):
"""Return 400 if the volume exists but is attached."""
- res = self._get_resp(attached_vol_id)
+ vol = utils.create_volume(self.ctxt, status='in-use',
+ attach_status='attached')
+ res = self._get_resp(vol.id)
self.assertEqual(400, res.status_int, res)
+ db.volume_destroy(self.ctxt, vol.id)
- @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
- def test_unmanage_volume_with_snapshots(self, metadata_get):
+ def test_unmanage_volume_with_snapshots(self):
"""Return 400 if the volume exists but has snapshots."""
- res = self._get_resp(snapshot_vol_id)
+ vol = utils.create_volume(self.ctxt)
+ snap = utils.create_snapshot(self.ctxt, vol.id)
+ res = self._get_resp(vol.id)
self.assertEqual(400, res.status_int, res)
+ db.volume_destroy(self.ctxt, vol.id)
+ db.snapshot_destroy(self.ctxt, snap.id)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
- exp = self.assertRaises(webob.exc.HTTPBadRequest,
+ exp = self.assertRaises(exception.VolumeAttached,
self.controller.delete,
req, 1)
- expect_msg = "Volume cannot be deleted while in attached state"
+ expect_msg = "Volume 1 is still attached, detach volume first."
self.assertEqual(expect_msg, six.text_type(exp))
def test_volume_delete_no_volume(self):
'pools': {}}
# keep ordered record of what we execute
self.called = []
+ self.volume_api = cinder.volume.api.API()
def _cleanup(self):
try:
@mock.patch.object(keymgr, 'API', new=fake_keymgr.fake_api)
def test_create_delete_volume_with_encrypted_volume_type(self):
- ctxt = context.get_admin_context()
-
- db.volume_type_create(ctxt,
- {'id': '61298380-0c12-11e3-bfd6-4b48424183be',
- 'name': 'LUKS'})
+ db_vol_type = db.volume_type_create(self.context,
+ {'id': 'type-id', 'name': 'LUKS'})
db.volume_type_encryption_create(
- ctxt,
- '61298380-0c12-11e3-bfd6-4b48424183be',
+ self.context, 'type-id',
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER})
- volume_api = cinder.volume.api.API()
-
- db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS')
-
- volume = volume_api.create(self.context,
- 1,
- 'name',
- 'description',
- volume_type=db_vol_type)
+ volume = self.volume_api.create(self.context,
+ 1,
+ 'name',
+ 'description',
+ volume_type=db_vol_type)
self.assertIsNotNone(volume.get('encryption_key_id', None))
self.assertEqual(db_vol_type.get('id'), volume['volume_type_id'])
volume['host'] = 'fake_host'
volume['status'] = 'available'
- volume_api.delete(self.context, volume)
+ db.volume_update(self.context, volume['id'], {'status': 'available'})
+ self.volume_api.delete(self.context, volume)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('deleting', volume['status'])
pass
@staticmethod
- def _create_snapshot(volume_id, size=1, metadata=None):
+ def _create_snapshot(volume_id, size=1, metadata=None, ctxt=None,
+ **kwargs):
"""Create a snapshot object."""
metadata = metadata or {}
- snap = objects.Snapshot(context.get_admin_context())
+ snap = objects.Snapshot(ctxt or context.get_admin_context())
snap.volume_size = size
snap.user_id = 'fake'
snap.project_id = 'fake'
snap.status = "creating"
if metadata is not None:
snap.metadata = metadata
+ snap.update(kwargs)
+
snap.create()
return snap
def _test_cannot_delete_volume(self, status):
"""Test volume can't be deleted in invalid stats."""
# create a volume and assign to host
- volume = tests_utils.create_volume(self.context, **self.volume_params)
- self.volume.create_volume(self.context, volume['id'])
- volume['status'] = status
- volume['host'] = 'fakehost'
-
- volume_api = cinder.volume.api.API()
+ volume = tests_utils.create_volume(self.context, CONF.host,
+ status=status)
# 'in-use' status raises InvalidVolume
self.assertRaises(exception.InvalidVolume,
- volume_api.delete,
+ self.volume_api.delete,
self.context,
volume)
def test_force_delete_volume(self):
"""Test volume can be forced to delete."""
# create a volume and assign to host
+ self.volume_params['status'] = 'error_deleting'
volume = tests_utils.create_volume(self.context, **self.volume_params)
- self.volume.create_volume(self.context, volume['id'])
- volume['status'] = 'error_deleting'
-
- volume_api = cinder.volume.api.API()
# 'error_deleting' volumes can't be deleted
self.assertRaises(exception.InvalidVolume,
- volume_api.delete,
+ self.volume_api.delete,
self.context,
volume)
# delete with force
- volume_api.delete(self.context, volume, force=True)
+ self.volume_api.delete(self.context, volume, force=True)
# status is deleting
volume = objects.Volume.get_by_id(context.get_admin_context(),
def test_cannot_force_delete_attached_volume(self):
"""Test volume can't be force delete in attached state."""
- volume = tests_utils.create_volume(self.context, **self.volume_params)
- self.volume.create_volume(self.context, volume['id'])
- volume['status'] = 'in-use'
- volume['attach_status'] = 'attached'
- volume['host'] = 'fakehost'
+ volume = tests_utils.create_volume(self.context, CONF.host,
+ status='in-use',
+ attach_status = 'attached')
- volume_api = cinder.volume.api.API()
-
- self.assertRaises(exception.VolumeAttached,
- volume_api.delete,
+ self.assertRaises(exception.InvalidVolume,
+ self.volume_api.delete,
self.context,
volume,
force=True)
- self.volume.delete_volume(self.context, volume['id'])
+ db.volume_destroy(self.context, volume.id)
def test_cannot_delete_volume_with_snapshots(self):
"""Test volume can't be deleted with dependent snapshots."""
def test_can_delete_errored_snapshot(self):
"""Test snapshot can be created and deleted."""
- volume = tests_utils.create_volume(self.context, **self.volume_params)
- self.volume.create_volume(self.context, volume['id'])
- snapshot = self._create_snapshot(volume['id'], size=volume['size'])
- self.volume.create_snapshot(self.context, volume['id'], snapshot)
-
- volume_api = cinder.volume.api.API()
-
- snapshot.status = 'badstatus'
+ volume = tests_utils.create_volume(self.context, CONF.host)
+ snapshot = self._create_snapshot(volume.id, size=volume['size'],
+ ctxt=self.context, status='bad')
self.assertRaises(exception.InvalidSnapshot,
- volume_api.delete_snapshot,
+ self.volume_api.delete_snapshot,
self.context,
snapshot)
snapshot.status = 'error'
- self.volume.delete_snapshot(self.context, snapshot)
- self.volume.delete_volume(self.context, volume['id'])
+ snapshot.save()
+ self.volume_api.delete_snapshot(self.context, snapshot)
+
+ self.assertEqual('deleting', snapshot.status)
+ self.volume.delete_volume(self.context, volume.id)
def test_create_snapshot_force(self):
"""Test snapshot in use can be created forcibly."""
class ConsistencyGroupTestCase(BaseVolumeTestCase):
def test_delete_volume_in_consistency_group(self):
"""Test deleting a volume that's tied to a consistency group fails."""
- volume_api = cinder.volume.api.API()
- volume = tests_utils.create_volume(self.context, **self.volume_params)
consistencygroup_id = '12345678-1234-5678-1234-567812345678'
- volume = db.volume_update(self.context, volume['id'],
- {'status': 'available',
+ volume_api = cinder.volume.api.API()
+ self.volume_params.update({'status': 'available',
'consistencygroup_id': consistencygroup_id})
+ volume = tests_utils.create_volume(self.context, **self.volume_params)
self.assertRaises(exception.InvalidVolume,
volume_api.delete, self.context, volume)
@wrap_check_policy
def delete(self, context, volume, force=False, unmanage_only=False):
- if context.is_admin and context.project_id != volume['project_id']:
- project_id = volume['project_id']
+ if context.is_admin and context.project_id != volume.project_id:
+ project_id = volume.project_id
else:
project_id = context.project_id
- volume_id = volume['id']
- if not volume['host']:
+ if not volume.host:
volume_utils.notify_about_volume_usage(context,
volume, "delete.start")
# NOTE(vish): scheduling failed, so delete it
# Note(zhiteng): update volume quota reservation
try:
- reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
+ reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
- volume['volume_type_id'])
+ volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
volume, "delete.end")
LOG.info(_LI("Delete volume request issued successfully."),
resource={'type': 'volume',
- 'id': volume_id})
+ 'id': volume.id})
return
- if volume['attach_status'] == "attached":
- # Volume is still attached, need to detach first
- LOG.info(_LI('Unable to delete volume: %s, '
- 'volume is attached.'), volume['id'])
- raise exception.VolumeAttached(volume_id=volume_id)
-
- if not force and volume['status'] not in ["available", "error",
- "error_restoring",
- "error_extending"]:
- msg = _("Volume status must be available or error, "
- "but current status is: %s.") % volume['status']
- LOG.info(_LI('Unable to delete volume: %(vol_id)s, '
- 'volume must be available or '
- 'error, but is %(vol_status)s.'),
- {'vol_id': volume['id'],
- 'vol_status': volume['status']})
- raise exception.InvalidVolume(reason=msg)
- if self._is_volume_migrating(volume):
- # Volume is migrating, wait until done
- LOG.info(_LI('Unable to delete volume: %s, '
- 'volume is currently migrating.'), volume['id'])
- msg = _("Volume cannot be deleted while migrating")
- raise exception.InvalidVolume(reason=msg)
+ # Build required conditions for conditional update
+ expected = {'attach_status': db.Not('attached'),
+ 'migration_status': self.AVAILABLE_MIGRATION_STATUS,
+ 'consistencygroup_id': None}
- if volume['consistencygroup_id'] is not None:
- msg = _("Volume cannot be deleted while in a consistency group.")
- LOG.info(_LI('Unable to delete volume: %s, '
- 'volume is currently part of a '
- 'consistency group.'), volume['id'])
- raise exception.InvalidVolume(reason=msg)
+ # If not force deleting we have status conditions
+ if not force:
+ expected['status'] = ('available', 'error', 'error_restoring',
+ 'error_extending')
+
+ # Volume cannot have snapshots if we want to delete it
+ filters = [~db.volume_has_snapshots_filter()]
+ values = {'status': 'deleting', 'terminated_at': timeutils.utcnow()}
+
+ result = volume.conditional_update(values, expected, filters)
- snapshots = objects.SnapshotList.get_all_for_volume(context,
- volume_id)
- if len(snapshots):
- LOG.info(_LI('Unable to delete volume: %s, '
- 'volume currently has snapshots.'), volume['id'])
- msg = _("Volume still has %d dependent "
- "snapshots.") % len(snapshots)
+ if not result:
+ status = utils.build_or_str(expected.get('status'),
+ _(' status must be %s and '))
+ msg = _('Volume%s must not be migrating, attached, belong to a '
+ 'consistency group or have snapshots.') % status
+ LOG.info(msg)
raise exception.InvalidVolume(reason=msg)
cache = image_cache.ImageVolumeCache(self.db, self)
- entry = cache.get_by_image_volume(context, volume_id)
+ entry = cache.get_by_image_volume(context, volume.id)
if entry:
cache.evict(context, entry)
msg = _("Unable to delete encrypted volume: %s.") % e.msg
raise exception.InvalidVolume(reason=msg)
- volume.status = 'deleting'
- volume.terminated_at = timeutils.utcnow()
- volume.save()
-
self.volume_rpcapi.delete_volume(context, volume, unmanage_only)
LOG.info(_LI("Delete volume request issued successfully."),
resource=volume)
@wrap_check_policy
def delete_snapshot(self, context, snapshot, force=False,
unmanage_only=False):
- if not force and snapshot.status not in ["available", "error"]:
- LOG.error(_LE('Unable to delete snapshot: %(snap_id)s, '
- 'due to invalid status. '
- 'Status must be available or '
- 'error, not %(snap_status)s.'),
- {'snap_id': snapshot.id,
- 'snap_status': snapshot.status})
- msg = _("Volume Snapshot status must be available or error.")
- raise exception.InvalidSnapshot(reason=msg)
- cgsnapshot_id = snapshot.cgsnapshot_id
- if cgsnapshot_id:
- msg = _('Unable to delete snapshot %s because it is part of a '
- 'consistency group.') % snapshot.id
+ # Build required conditions for conditional update
+ expected = {'cgsnapshot_id': None}
+ # If not force deleting we have status conditions
+ if not force:
+ expected['status'] = ('available', 'error')
+
+ result = snapshot.conditional_update({'status': 'deleting'}, expected)
+ if not result:
+ status = utils.build_or_str(expected.get('status'),
+ _(' status must be %s and '))
+ msg = (_('Snapshot%s must not be part of a consistency group.') %
+ status)
LOG.error(msg)
raise exception.InvalidSnapshot(reason=msg)
- snapshot_obj = self.get_snapshot(context, snapshot.id)
- snapshot_obj.status = 'deleting'
- snapshot_obj.save()
-
- volume = self.db.volume_get(context, snapshot_obj.volume_id)
- self.volume_rpcapi.delete_snapshot(context, snapshot_obj,
- volume['host'],
+ # Make RPC call to the right host
+ volume = objects.Volume.get_by_id(context, snapshot.volume_id)
+ self.volume_rpcapi.delete_snapshot(context, snapshot, volume.host,
unmanage_only=unmanage_only)
LOG.info(_LI("Snapshot delete request issued successfully."),
resource=snapshot)