]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Snapshot reservation sync calls wrong resource.
authorjohn-griffith <john.griffith@solidfire.com>
Thu, 21 Mar 2013 15:48:03 +0000 (09:48 -0600)
committerjohn-griffith <john.griffith@solidfire.com>
Sat, 23 Mar 2013 21:34:30 +0000 (15:34 -0600)
The snapshot reservations code isn't calling the
correct resource on sync (it's calling volumes).  There's
also some problems with the logic being used on the delete/clean up
that are fixed here as well.

Fixes bug: 1157506
Fixes bug: 1157982

Change-Id: I91327b8043ab63aa35ea8a91b6de544bf5bf6c61
(cherry picked from commit b450eef832ff471b78776c9715b0878e95d69263)

cinder/db/api.py
cinder/flags.py
cinder/quota.py
cinder/tests/test_quota.py
cinder/volume/api.py
cinder/volume/manager.py
etc/cinder/cinder.conf.sample

index 4f3b92544ab9edf57ad76419d7760e8857aa0fac..60346ae239e1c8245d5293bd2533b6b2e971113b 100644 (file)
@@ -314,7 +314,7 @@ def snapshot_data_get_for_project(context, project_id, session=None):
     """Get count and gigabytes used for snapshots for specified project."""
     return IMPL.snapshot_data_get_for_project(context,
                                               project_id,
-                                              session=None)
+                                              session)
 
 
 ####################
index bfe7661819d2884b11f5ce6f654d99a126e86a2e..d82e1ca8a508107898d8c8dc4910873b75863bf9 100644 (file)
@@ -238,6 +238,9 @@ global_opts = [
                 default=None,
                 help='A list of backend names to use. These backend names '
                      'should be backed by a unique [CONFIG] group '
-                     'with its options'), ]
+                     'with its options'),
+    cfg.BoolOpt('no_snapshot_gb_quota',
+                default=False,
+                help='Whether snapshots count against GigaByte quota'), ]
 
 FLAGS.register_opts(global_opts)
index 2dd21875227b54ad8a53bfba8681eaef6e9c64d2..34196c2df5b505168d1078c48ab9c8190ff3a72c 100644 (file)
@@ -738,9 +738,9 @@ def _sync_volumes(context, project_id, session):
 
 def _sync_snapshots(context, project_id, session):
     return dict(zip(('snapshots', 'gigabytes'),
-                db.volume_data_get_for_project(context,
-                                               project_id,
-                                               session=session)))
+                db.snapshot_data_get_for_project(context,
+                                                 project_id,
+                                                 session=session)))
 
 
 QUOTAS = QuotaEngine()
index f6c81837b57c5502f272ce36cec7ba8174992c78..20b956335f85cda9ff859c61a1f81068aec20b0b 100644 (file)
@@ -68,13 +68,14 @@ class QuotaIntegrationTestCase(test.TestCase):
         vol['user_id'] = self.user_id
         vol['project_id'] = self.project_id
         vol['size'] = size
-        return db.volume_create(self.context, vol)['id']
+        vol['status'] = 'available'
+        return db.volume_create(self.context, vol)
 
     def test_too_many_volumes(self):
         volume_ids = []
         for i in range(FLAGS.quota_volumes):
-            volume_id = self._create_volume()
-            volume_ids.append(volume_id)
+            vol_ref = self._create_volume()
+            volume_ids.append(vol_ref['id'])
         self.assertRaises(exception.QuotaError,
                           volume.API().create,
                           self.context, 10, '', '', None)
@@ -83,8 +84,8 @@ class QuotaIntegrationTestCase(test.TestCase):
 
     def test_too_many_gigabytes(self):
         volume_ids = []
-        volume_id = self._create_volume(size=20)
-        volume_ids.append(volume_id)
+        vol_ref = self._create_volume(size=20)
+        volume_ids.append(vol_ref['id'])
         self.assertRaises(exception.QuotaError,
                           volume.API().create,
                           self.context, 10, '', '', None)
index a6fb584b18d005c631381dd7414ff2eefe873288..8c3d394b0468f88edaf59542fc396722411d5c8d 100644 (file)
@@ -491,8 +491,11 @@ class API(base.Base):
             raise exception.InvalidVolume(reason=msg)
 
         try:
-            reservations = QUOTAS.reserve(context, snapshots=1,
-                                          gigabytes=volume['size'])
+            if FLAGS.no_snapshot_gb_quota:
+                reservations = QUOTAS.reserve(context, snapshots=1)
+            else:
+                reservations = QUOTAS.reserve(context, snapshots=1,
+                                              gigabytes=volume['size'])
         except exception.OverQuota as e:
             overs = e.kwargs['overs']
             usages = e.kwargs['usages']
index c317d4e65179323d5a1dd12a6a8832eb953fa0c2..86b7d68415b8044bd75ea5f79394bf8d155d9f96 100644 (file)
@@ -489,9 +489,25 @@ class VolumeManager(manager.SchedulerDependentManager):
                                         snapshot_ref['id'],
                                         {'status': 'error_deleting'})
 
+        # Get reservations
+        try:
+            if CONF.no_snapshot_gb_quota:
+                reservations = QUOTAS.reserve(context, snapshots=-1)
+            else:
+                reservations = QUOTAS.reserve(
+                    context,
+                    snapshots=-1,
+                    gigabytes=-snapshot_ref['volume_size'])
+        except Exception:
+            reservations = None
+            LOG.exception(_("Failed to update usages deleting snapshot"))
         self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
         self.db.snapshot_destroy(context, snapshot_id)
         LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
+
+        # Commit the reservations
+        if reservations:
+            QUOTAS.commit(context, reservations)
         return True
 
     def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
index 2036f7025ab69140a6ac06493606d7210b230e3f..4092bd288e115accc8a70c5b83b13c43ef7fd581 100644 (file)
@@ -44,6 +44,8 @@
 # syslog facility to receive log lines (string value)
 #syslog_log_facility=LOG_USER
 
+# Do not count snapshots against gigabytes quota (bool value)
+#no_snapshot_gb_quota=False
 
 #
 # Options defined in cinder.exception
 #volume_driver=cinder.volume.driver.FakeISCSIDriver
 
 
-# Total option count: 254
+# Total option count: 255