]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Fixed shared gigabytes quota resource.
authorJohn Griffith <john.griffith@solidfire.com>
Sun, 24 Mar 2013 20:49:46 +0000 (20:49 +0000)
committerJohn Griffith <john.griffith@solidfire.com>
Sun, 24 Mar 2013 20:52:03 +0000 (20:52 +0000)
The shared gigabytes resource between volumes and snapshots wasn't
working properly.  The issue was that on update/sync the action item
(volumes or snapshots) would update the resource usages based only on
it's own particular item.

This patch fixes that, and makes the total gigabytes truly shared
between volumesa and snapshots.

Fixes bug: 1159489

Change-Id: Ib1be9788f0beb4f94d010e4f816f9f3393371205

cinder/quota.py
cinder/volume/api.py

index 34196c2df5b505168d1078c48ab9c8190ff3a72c..1a1edc92b670269520e454506d5a96c7f26736f2 100644 (file)
@@ -749,7 +749,8 @@ QUOTAS = QuotaEngine()
 resources = [
     ReservableResource('volumes', _sync_volumes, 'quota_volumes'),
     ReservableResource('snapshots', _sync_snapshots, 'quota_snapshots'),
-    ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'), ]
+    ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'),
+    ReservableResource('gigabytes', _sync_snapshots, 'quota_gigabytes'), ]
 
 
 QUOTAS.register_resources(resources)
index 911664bf8f8de198df6e6d453e2721aedc7957c4..5b3cccd4b7739bedff5b45ba69b8c228fc7ce73d 100644 (file)
@@ -154,19 +154,21 @@ class API(base.Base):
             def _consumed(name):
                 return (usages[name]['reserved'] + usages[name]['in_use'])
 
-            pid = context.project_id
             if 'gigabytes' in overs:
-                consumed = _consumed('gigabytes')
-                quota = quotas['gigabytes']
-                LOG.warn(_("Quota exceeded for %(pid)s, tried to create "
-                           "%(size)sG volume (%(consumed)dG of %(quota)dG "
-                           "already consumed)") % locals())
+                msg = _("Quota exceeded for %(s_pid)s, tried to create "
+                        "%(s_size)sG volume (%(d_consumed)dG of %(d_quota)dG "
+                        "already consumed)")
+                LOG.warn(msg % {'s_pid': context.project_id,
+                                's_size': size,
+                                'd_consumed': _consumed('gigabytes'),
+                                'd_quota': quotas['gigabytes']})
                 raise exception.VolumeSizeExceedsAvailableQuota()
             elif 'volumes' in overs:
-                consumed = _consumed('volumes')
-                LOG.warn(_("Quota exceeded for %(pid)s, tried to create "
-                           "volume (%(consumed)d volumes "
-                           "already consumed)") % locals())
+                msg = _("Quota exceeded for %(s_pid)s, tried to create "
+                        "volume (%(d_consumed)d volumes"
+                        "already consumed)")
+                LOG.warn(msg % {'s_pid': context.project_id,
+                                'd_consumed': _consumed('volumes')})
                 raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])
 
         if availability_zone is None:
@@ -238,15 +240,15 @@ class API(base.Base):
             volume_ref = self.db.volume_update(context, volume_id, values)
 
             # bypass scheduler and send request directly to volume
-            self.volume_rpcapi.create_volume(context,
-                                             volume_ref,
-                                             volume_ref['host'],
-                                             request_spec=request_spec,
-                                             filter_properties=
-                                             filter_properties,
-                                             allow_reschedule=False,
-                                             snapshot_id=snapshot_id,
-                                             image_id=image_id)
+            self.volume_rpcapi.create_volume(
+                context,
+                volume_ref,
+                volume_ref['host'],
+                request_spec=request_spec,
+                filter_properties=filter_properties,
+                allow_reschedule=False,
+                snapshot_id=snapshot_id,
+                image_id=image_id)
         elif source_volid:
             source_volume_ref = self.db.volume_get(context,
                                                    source_volid)
@@ -255,25 +257,25 @@ class API(base.Base):
             volume_ref = self.db.volume_update(context, volume_id, values)
 
             # bypass scheduler and send request directly to volume
-            self.volume_rpcapi.create_volume(context,
-                                             volume_ref,
-                                             volume_ref['host'],
-                                             request_spec=request_spec,
-                                             filter_properties=
-                                             filter_properties,
-                                             allow_reschedule=False,
-                                             snapshot_id=snapshot_id,
-                                             image_id=image_id,
-                                             source_volid=source_volid)
+            self.volume_rpcapi.create_volume(
+                context,
+                volume_ref,
+                volume_ref['host'],
+                request_spec=request_spec,
+                filter_properties=filter_properties,
+                allow_reschedule=False,
+                snapshot_id=snapshot_id,
+                image_id=image_id,
+                source_volid=source_volid)
         else:
-            self.scheduler_rpcapi.create_volume(context,
-                                                FLAGS.volume_topic,
-                                                volume_id,
-                                                snapshot_id,
-                                                image_id,
-                                                request_spec=request_spec,
-                                                filter_properties=
-                                                filter_properties)
+            self.scheduler_rpcapi.create_volume(
+                context,
+                FLAGS.volume_topic,
+                volume_id,
+                snapshot_id,
+                image_id,
+                request_spec=request_spec,
+                filter_properties=filter_properties)
 
     @wrap_check_policy
     def delete(self, context, volume, force=False):
@@ -514,19 +516,22 @@ class API(base.Base):
             def _consumed(name):
                 return (usages[name]['reserved'] + usages[name]['in_use'])
 
-            pid = context.project_id
             if 'gigabytes' in overs:
-                consumed = _consumed('gigabytes')
-                quota = quotas['gigabytes']
-                LOG.warn(_("Quota exceeded for %(pid)s, tried to create "
-                           "%(size)sG volume (%(consumed)dG of %(quota)dG "
-                           "already consumed)") % locals())
+                msg = _("Quota exceeded for %(s_pid)s, tried to create "
+                        "%(s_size)sG snapshot (%(d_consumed)dG of "
+                        "%(d_quota)dG already consumed)")
+                LOG.warn(msg % {'s_pid': context.project_id,
+                                's_size': volume['size'],
+                                'd_consumed': _consumed('gigabytes'),
+                                'd_quota': quotas['gigabytes']})
                 raise exception.VolumeSizeExceedsAvailableQuota()
             elif 'snapshots' in overs:
-                consumed = _consumed('snapshots')
-                LOG.warn(_("Quota exceeded for %(pid)s, tried to create "
-                           "snapshot (%(consumed)d snapshots "
-                           "already consumed)") % locals())
+                msg = _("Quota exceeded for %(s_pid)s, tried to create "
+                        "snapshot (%(d_consumed)d snapshots "
+                        "already consumed)")
+
+                LOG.warn(msg % {'s_pid': context.project_id,
+                                'd_consumed': _consumed('snapshots')})
                 raise exception.SnapshotLimitExceeded(
                     allowed=quotas['snapshots'])