From 764c5ec749821d36bb0215dc6002d3caea95d3b1 Mon Sep 17 00:00:00 2001 From: Xing Yang Date: Fri, 5 Sep 2014 17:48:00 -0400 Subject: [PATCH] Delete consistency group failed Delete CG failed because it compares host@backend with host@backend#pool (in delete_consistencygroup in volume/manager.py). The fix is to call extract_host before doing the comparison. Another issue is about deleting a CG with no host. This will throw exception when extract_host(group['host']) is called in delete_consistencygroup in volume/rpcapi.py. The solution is to check the host field in consistencygroup/ api.py and delete it from db there. Change-Id: Ife28a4f91bd8a4e123c74dac9748ee31cc7b7306 Closes-Bug: #1366205 --- cinder/consistencygroup/api.py | 28 +++++-- .../api/contrib/test_consistencygroups.py | 59 ++++++++++++-- cinder/tests/test_volume.py | 78 +++++++++++++++++++ cinder/volume/manager.py | 6 +- 4 files changed, 157 insertions(+), 14 deletions(-) diff --git a/cinder/consistencygroup/api.py b/cinder/consistencygroup/api.py index b1ae8e47f..bda01e4fa 100644 --- a/cinder/consistencygroup/api.py +++ b/cinder/consistencygroup/api.py @@ -156,7 +156,7 @@ class API(base.Base): filter_properties_list.append(filter_properties) # Update quota for consistencygroups - self.update_quota(context, group['id']) + self.update_quota(context, group['id'], 1) self._cast_create_consistencygroup(context, group['id'], request_spec_list, @@ -219,21 +219,35 @@ class API(base.Base): request_spec_list=request_spec_list, filter_properties_list=filter_properties_list) - def update_quota(self, context, group_id): - reserve_opts = {'consistencygroups': 1} + def update_quota(self, context, group_id, num, project_id=None): + reserve_opts = {'consistencygroups': num} try: - reservations = CGQUOTAS.reserve(context, **reserve_opts) - CGQUOTAS.commit(context, reservations) + reservations = CGQUOTAS.reserve(context, + project_id=project_id, + **reserve_opts) + if reservations: + CGQUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: - self.db.consistencygroup_destroy(context, group_id) + self.db.consistencygroup_destroy(context.elevated(), + group_id) finally: - LOG.error(_("Failed to update quota for creating" + LOG.error(_("Failed to update quota for " "consistency group %s."), group_id) @wrap_check_policy def delete(self, context, group, force=False): + if not group['host']: + self.update_quota(context, group['id'], -1, group['project_id']) + + msg = ("No host for consistency group %s. Deleting from " + "the database.") % group['id'] + LOG.debug(msg) + self.db.consistencygroup_destroy(context.elevated(), group['id']) + + return + if not force and group['status'] not in ["available", "error"]: msg = _("Consistency group status must be available or error, " "but current status is: %s") % group['status'] diff --git a/cinder/tests/api/contrib/test_consistencygroups.py b/cinder/tests/api/contrib/test_consistencygroups.py index 3600457bc..18a183c0b 100644 --- a/cinder/tests/api/contrib/test_consistencygroups.py +++ b/cinder/tests/api/contrib/test_consistencygroups.py @@ -20,13 +20,14 @@ Tests for consistency group code. import json from xml.dom import minidom +import mock import webob +import cinder.consistencygroup from cinder import context from cinder import db from cinder import test from cinder.tests.api import fakes -import cinder.volume class ConsistencyGroupsAPITestCase(test.TestCase): @@ -34,10 +35,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase): def setUp(self): super(ConsistencyGroupsAPITestCase, self).setUp() - self.volume_api = cinder.volume.API() - self.context = context.get_admin_context() - self.context.project_id = 'fake' - self.context.user_id = 'fake' + self.cg_api = cinder.consistencygroup.API() @staticmethod def _create_consistencygroup( @@ -45,6 +43,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase): description='this is a test consistency group', volume_type_id='123456', availability_zone='az1', + host='fakehost', status='creating'): """Create a consistency group object.""" consistencygroup = {} @@ -54,8 +53,8 @@ class ConsistencyGroupsAPITestCase(test.TestCase): consistencygroup['name'] = name consistencygroup['description'] = description consistencygroup['volume_type_id'] = volume_type_id + consistencygroup['host'] = host consistencygroup['status'] = status - consistencygroup['host'] = 'fakehost' return db.consistencygroup_create( context.get_admin_context(), consistencygroup)['id'] @@ -378,3 +377,51 @@ class ConsistencyGroupsAPITestCase(test.TestCase): db.consistencygroup_destroy(context.get_admin_context(), consistencygroup_id) + + def test_delete_consistencygroup_no_host(self): + consistencygroup_id = self._create_consistencygroup( + host=None, + status='error') + req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' % + consistencygroup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + body = {"consistencygroup": {"force": True}} + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + cg = db.consistencygroup_get( + context.get_admin_context(read_deleted='yes'), + consistencygroup_id) + self.assertEqual(cg['status'], 'deleted') + self.assertEqual(cg['host'], None) + + def test_create_delete_consistencygroup_update_quota(self): + ctxt = context.RequestContext('fake', 'fake', auth_token=True) + name = 'mycg' + description = 'consistency group 1' + fake_type = {'id': '1', 'name': 'fake_type'} + self.stubs.Set(db, 'volume_types_get_by_name_or_id', + mock.Mock(return_value=[fake_type])) + self.stubs.Set(self.cg_api, + '_cast_create_consistencygroup', + mock.Mock()) + self.stubs.Set(self.cg_api, 'update_quota', + mock.Mock()) + + cg = self.cg_api.create(ctxt, name, description, fake_type['name']) + self.cg_api.update_quota.assert_called_once_with( + ctxt, cg['id'], 1) + self.assertEqual(cg['status'], 'creating') + self.assertEqual(cg['host'], None) + self.cg_api.update_quota.reset_mock() + + cg['status'] = 'error' + self.cg_api.delete(ctxt, cg) + self.cg_api.update_quota.assert_called_once_with( + ctxt, cg['id'], -1, ctxt.project_id) + cg = db.consistencygroup_get( + context.get_admin_context(read_deleted='yes'), + cg['id']) + self.assertEqual(cg['status'], 'deleted') diff --git a/cinder/tests/test_volume.py b/cinder/tests/test_volume.py index 712b3c369..ca57c7bbe 100644 --- a/cinder/tests/test_volume.py +++ b/cinder/tests/test_volume.py @@ -3104,8 +3104,86 @@ class VolumeTestCase(BaseVolumeTestCase): db.cgsnapshot_get, self.context, cgsnapshot_id) + self.volume.delete_consistencygroup(self.context, group_id) + def test_delete_consistencygroup_correct_host(self): + """Test consistencygroup can be deleted. + + Test consistencygroup can be deleted when volumes are on + the correct volume node. + """ + + rval = {'status': 'available'} + driver.VolumeDriver.create_consistencygroup = \ + mock.Mock(return_value=rval) + + rval = {'status': 'deleted'}, [] + driver.VolumeDriver.delete_consistencygroup = \ + mock.Mock(return_value=rval) + + group = tests_utils.create_consistencygroup( + self.context, + availability_zone=CONF.storage_availability_zone, + volume_type='type1,type2') + + group_id = group['id'] + volume = tests_utils.create_volume( + self.context, + consistencygroup_id = group_id, + host='host1@backend1#pool1', + status = 'creating', + size = 1) + self.volume.host = 'host1@backend1' + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + + self.volume.delete_consistencygroup(self.context, group_id) + cg = db.consistencygroup_get( + context.get_admin_context(read_deleted='yes'), + group_id) + self.assertEqual(cg['status'], 'deleted') + self.assertRaises(exception.NotFound, + db.consistencygroup_get, + self.context, + group_id) + + def test_delete_consistencygroup_wrong_host(self): + """Test consistencygroup cannot be deleted. + + Test consistencygroup cannot be deleted when volumes in the + group are not local to the volume node. + """ + + rval = {'status': 'available'} + driver.VolumeDriver.create_consistencygroup = \ + mock.Mock(return_value=rval) + + group = tests_utils.create_consistencygroup( + self.context, + availability_zone=CONF.storage_availability_zone, + volume_type='type1,type2') + + group_id = group['id'] + volume = tests_utils.create_volume( + self.context, + consistencygroup_id = group_id, + host='host1@backend1#pool1', + status = 'creating', + size = 1) + self.volume.host = 'host1@backend2' + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + + self.assertRaises(exception.InvalidVolume, + self.volume.delete_consistencygroup, + self.context, + group_id) + cg = db.consistencygroup_get(self.context, + group_id) + # Group is not deleted + self.assertEqual(cg['status'], 'available') + class CopyVolumeToImageTestCase(BaseVolumeTestCase): def fake_local_path(self, volume): diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 2c5ce6a90..3874edd62 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -1667,7 +1667,11 @@ class VolumeManager(manager.SchedulerDependentManager): if volume_ref['attach_status'] == "attached": # Volume is still attached, need to detach first raise exception.VolumeAttached(volume_id=volume_ref['id']) - if volume_ref['host'] != self.host: + # self.host is 'host@backend' + # volume_ref['host'] is 'host@backend#pool' + # Extract host before doing comparison + new_host = vol_utils.extract_host(volume_ref['host']) + if new_host != self.host: raise exception.InvalidVolume( reason=_("Volume is not local to this node")) -- 2.45.2