From 5ffed5d2f71215de963c5a54e4f8a0ba48a05803 Mon Sep 17 00:00:00 2001 From: john-griffith Date: Thu, 21 Mar 2013 09:48:03 -0600 Subject: [PATCH] Snapshot reservation sync calls wrong resource. The snapshot reservations code isn't calling the correct resource on sync (it's calling volumes). There's also some problems with the logic being used on the delete/clean up that are fixed here as well. Fixes bug: 1157506 Fixes bug: 1157982 Change-Id: I91327b8043ab63aa35ea8a91b6de544bf5bf6c61 (cherry picked from commit b450eef832ff471b78776c9715b0878e95d69263) --- cinder/db/api.py | 2 +- cinder/flags.py | 5 ++++- cinder/quota.py | 6 +++--- cinder/tests/test_quota.py | 11 ++++++----- cinder/volume/api.py | 7 +++++-- cinder/volume/manager.py | 16 ++++++++++++++++ etc/cinder/cinder.conf.sample | 4 +++- 7 files changed, 38 insertions(+), 13 deletions(-) diff --git a/cinder/db/api.py b/cinder/db/api.py index 4f3b92544..60346ae23 100644 --- a/cinder/db/api.py +++ b/cinder/db/api.py @@ -314,7 +314,7 @@ def snapshot_data_get_for_project(context, project_id, session=None): """Get count and gigabytes used for snapshots for specified project.""" return IMPL.snapshot_data_get_for_project(context, project_id, - session=None) + session) #################### diff --git a/cinder/flags.py b/cinder/flags.py index bfe766181..d82e1ca8a 100644 --- a/cinder/flags.py +++ b/cinder/flags.py @@ -238,6 +238,9 @@ global_opts = [ default=None, help='A list of backend names to use. These backend names ' 'should be backed by a unique [CONFIG] group ' - 'with its options'), ] + 'with its options'), + cfg.BoolOpt('no_snapshot_gb_quota', + default=False, + help='Whether snapshots count against GigaByte quota'), ] FLAGS.register_opts(global_opts) diff --git a/cinder/quota.py b/cinder/quota.py index 2dd218752..34196c2df 100644 --- a/cinder/quota.py +++ b/cinder/quota.py @@ -738,9 +738,9 @@ def _sync_volumes(context, project_id, session): def _sync_snapshots(context, project_id, session): return dict(zip(('snapshots', 'gigabytes'), - db.volume_data_get_for_project(context, - project_id, - session=session))) + db.snapshot_data_get_for_project(context, + project_id, + session=session))) QUOTAS = QuotaEngine() diff --git a/cinder/tests/test_quota.py b/cinder/tests/test_quota.py index f6c81837b..20b956335 100644 --- a/cinder/tests/test_quota.py +++ b/cinder/tests/test_quota.py @@ -68,13 +68,14 @@ class QuotaIntegrationTestCase(test.TestCase): vol['user_id'] = self.user_id vol['project_id'] = self.project_id vol['size'] = size - return db.volume_create(self.context, vol)['id'] + vol['status'] = 'available' + return db.volume_create(self.context, vol) def test_too_many_volumes(self): volume_ids = [] for i in range(FLAGS.quota_volumes): - volume_id = self._create_volume() - volume_ids.append(volume_id) + vol_ref = self._create_volume() + volume_ids.append(vol_ref['id']) self.assertRaises(exception.QuotaError, volume.API().create, self.context, 10, '', '', None) @@ -83,8 +84,8 @@ class QuotaIntegrationTestCase(test.TestCase): def test_too_many_gigabytes(self): volume_ids = [] - volume_id = self._create_volume(size=20) - volume_ids.append(volume_id) + vol_ref = self._create_volume(size=20) + volume_ids.append(vol_ref['id']) self.assertRaises(exception.QuotaError, volume.API().create, self.context, 10, '', '', None) diff --git a/cinder/volume/api.py b/cinder/volume/api.py index a6fb584b1..8c3d394b0 100644 --- a/cinder/volume/api.py +++ b/cinder/volume/api.py @@ -491,8 +491,11 @@ class API(base.Base): raise exception.InvalidVolume(reason=msg) try: - reservations = QUOTAS.reserve(context, snapshots=1, - gigabytes=volume['size']) + if FLAGS.no_snapshot_gb_quota: + reservations = QUOTAS.reserve(context, snapshots=1) + else: + reservations = QUOTAS.reserve(context, snapshots=1, + gigabytes=volume['size']) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index c317d4e65..86b7d6841 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -489,9 +489,25 @@ class VolumeManager(manager.SchedulerDependentManager): snapshot_ref['id'], {'status': 'error_deleting'}) + # Get reservations + try: + if CONF.no_snapshot_gb_quota: + reservations = QUOTAS.reserve(context, snapshots=-1) + else: + reservations = QUOTAS.reserve( + context, + snapshots=-1, + gigabytes=-snapshot_ref['volume_size']) + except Exception: + reservations = None + LOG.exception(_("Failed to update usages deleting snapshot")) self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id) self.db.snapshot_destroy(context, snapshot_id) LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['name']) + + # Commit the reservations + if reservations: + QUOTAS.commit(context, reservations) return True def attach_volume(self, context, volume_id, instance_uuid, mountpoint): diff --git a/etc/cinder/cinder.conf.sample b/etc/cinder/cinder.conf.sample index 2036f7025..4092bd288 100644 --- a/etc/cinder/cinder.conf.sample +++ b/etc/cinder/cinder.conf.sample @@ -44,6 +44,8 @@ # syslog facility to receive log lines (string value) #syslog_log_facility=LOG_USER +# Do not count snapshots against gigabytes quota (bool value) +#no_snapshot_gb_quota=False # # Options defined in cinder.exception @@ -1168,4 +1170,4 @@ #volume_driver=cinder.volume.driver.FakeISCSIDriver -# Total option count: 254 +# Total option count: 255 -- 2.45.2