]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Add config option to set max_volume_size_limit
authorRakesh Mishra <mishra.rakesh510@gmail.com>
Tue, 26 May 2015 20:54:20 +0000 (02:24 +0530)
committerRakesh Mishra <mishra.rakesh510@gmail.com>
Tue, 16 Jun 2015 18:38:34 +0000 (00:08 +0530)
There is a need to limit maximum size of a volume to levels
that the storage infrastructure can handle.
Setting a maximum limit on size of a volume also prevents
a tenant from creating large volumes that have not been tested
and certified to satisfy SLA objectives.

This feature allows admin to set volume size limit for a tenant.
Get default value for volume size limit via config.
The defaults will either come from the default values
set in the quota configuration option or via cinder.conf
if the user has configured default values for quotas there.

The per_volume_size_limit defaults to -1["No Limit"] always
unless changed in cinder.conf by admin

Change-Id: Ieb5c087ca7a33d22342470ea790a0c979a6244ea
Implements: blueprint cinder-quota-define-per-volume

cinder/db/sqlalchemy/migrate_repo/versions/047_add_per_volume_quota.py [new file with mode: 0644]
cinder/exception.py
cinder/quota.py
cinder/tests/unit/api/contrib/test_quotas.py
cinder/tests/unit/api/contrib/test_quotas_classes.py
cinder/tests/unit/test_quota.py
cinder/volume/flows/api/create_volume.py

diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/047_add_per_volume_quota.py b/cinder/db/sqlalchemy/migrate_repo/versions/047_add_per_volume_quota.py
new file mode 100644 (file)
index 0000000..a4a0e87
--- /dev/null
@@ -0,0 +1,62 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_log import log as logging
+from oslo_utils import timeutils
+from sqlalchemy import MetaData, Table
+
+from cinder.i18n import _LE
+
+# Get default value via config. The default will either
+# come from the default value set in the quota configuration option
+# or via cinder.conf if the user has configured
+# default value for per volume size limit there.
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+    """Add default "per_volume_gigabytes" row into DB."""
+    meta = MetaData()
+    meta.bind = migrate_engine
+    quota_classes = Table('quota_classes', meta, autoload=True)
+    row = quota_classes.count().\
+        where(quota_classes.c.resource == 'per_volume_gigabytes').\
+        execute().scalar()
+
+    # Do not add entry if there is already 'default' entry exists
+    # in the database.
+    # We don't want to write over something the user added.
+    if row:
+        return
+
+    try:
+        # Set default per_volume_gigabytes for per volume size
+        qci = quota_classes.insert()
+        qci.execute({'created_at': timeutils.utcnow(),
+                     'class_name': 'default',
+                     'resource': 'per_volume_gigabytes',
+                     'hard_limit': -1,
+                     'deleted': False, })
+    except Exception:
+        LOG.error(_LE("Default per_volume_gigabytes row not inserted "
+                      "into the quota_classes."))
+        raise
+
+
+def downgrade(migrate_engine):
+    """Don't delete the 'default' entries at downgrade time.
+    We don't know if the user had default entries when we started.
+    If they did, we wouldn't want to remove them.  So, the safest
+    thing to do is just leave the 'default' entries at downgrade time.
+    """
+    pass
index 5f29fe485f7c1cef083ea6445f4e8b5aceb451ab..d3af7c47a9c95f3628136f9a61e2fa593a71062e 100644 (file)
@@ -455,6 +455,11 @@ class VolumeSizeExceedsAvailableQuota(QuotaError):
                 "%(consumed)sG has been consumed.")
 
 
+class VolumeSizeExceedsLimit(QuotaError):
+    message = _("Requested volume size %(size)d is larger than "
+                "maximum allowed limit %(limit)d.")
+
+
 class VolumeBackupSizeExceedsAvailableQuota(QuotaError):
     message = _("Requested backup exceeds allowed Backup gigabytes "
                 "quota. Requested %(requested)sG, quota is %(quota)sG and "
index 717955d68c7b25cb71e4668005b4ac4137ab95da..751d9fbdae7532698dd5f7c885864350dfba2544 100644 (file)
@@ -69,7 +69,10 @@ quota_opts = [
     cfg.BoolOpt('use_default_quota_class',
                 default=True,
                 help='Enables or disables use of default quota class '
-                     'with default quota.'), ]
+                     'with default quota.'),
+    cfg.IntOpt('per_volume_size_limit',
+               default=-1,
+               help='Max size allowed per volume, in gigabytes'), ]
 
 CONF = cfg.CONF
 CONF.register_opts(quota_opts)
@@ -523,7 +526,8 @@ class ReservableResource(BaseResource):
         """
 
         super(ReservableResource, self).__init__(name, flag=flag)
-        self.sync = sync
+        if sync:
+            self.sync = sync
 
 
 class AbsoluteResource(BaseResource):
@@ -869,6 +873,7 @@ class VolumeTypeQuotaEngine(QuotaEngine):
         result = {}
         # Global quotas.
         argses = [('volumes', '_sync_volumes', 'quota_volumes'),
+                  ('per_volume_gigabytes', None, 'per_volume_size_limit'),
                   ('snapshots', '_sync_snapshots', 'quota_snapshots'),
                   ('gigabytes', '_sync_gigabytes', 'quota_gigabytes'),
                   ('backups', '_sync_backups', 'quota_backups'),
index 9e24657c0d4f1caec24573c2f2251cf0444006e0..591389650cb6d77740eba603c7b9add5d73b55ad 100644 (file)
@@ -30,12 +30,13 @@ from cinder import test
 
 def make_body(root=True, gigabytes=1000, snapshots=10,
               volumes=10, backups=10, backup_gigabytes=1000,
-              tenant_id='foo'):
+              tenant_id='foo', per_volume_gigabytes=-1):
     resources = {'gigabytes': gigabytes,
                  'snapshots': snapshots,
                  'volumes': volumes,
                  'backups': backups,
-                 'backup_gigabytes': backup_gigabytes}
+                 'backup_gigabytes': backup_gigabytes,
+                 'per_volume_gigabytes': per_volume_gigabytes, }
     # need to consider preexisting volume types as well
     volume_types = db.volume_type_get_all(context.get_admin_context())
     for volume_type in volume_types:
index cab57f14149fd33a23d4f91a2b75f2bcc1fd8464..5ddb9838b5266ce9d5c5712a496745a6a1572778 100644 (file)
@@ -33,13 +33,14 @@ QUOTAS = quota.QUOTAS
 
 def make_body(root=True, gigabytes=1000, snapshots=10,
               volumes=10, backups=10,
-              backup_gigabytes=1000,
+              backup_gigabytes=1000, per_volume_gigabytes=-1,
               volume_types_faked=None,
               tenant_id='foo'):
     resources = {'gigabytes': gigabytes,
                  'snapshots': snapshots,
                  'volumes': volumes,
                  'backups': backups,
+                 'per_volume_gigabytes': per_volume_gigabytes,
                  'backup_gigabytes': backup_gigabytes}
     if not volume_types_faked:
         volume_types_faked = {'fake_type': None}
index 0d543e02edd0dca26bd7a08d5869ef755dc0f891..4cc1681dffa225129f73bfd71f505e08a061d543 100644 (file)
@@ -96,6 +96,19 @@ class QuotaIntegrationTestCase(test.TestCase):
         backup['status'] = 'available'
         return db.backup_create(self.context, backup)
 
+    def test_volume_size_limit_exceeds(self):
+        resource = 'volumes_%s' % self.volume_type_name
+        db.quota_class_create(self.context, 'default', resource, 1)
+        flag_args = {
+            'quota_volumes': 10,
+            'quota_gigabytes': 1000,
+            'per_volume_size_limit': 5
+        }
+        self.flags(**flag_args)
+        self.assertRaises(exception.VolumeSizeExceedsLimit,
+                          volume.API().create,
+                          self.context, 10, '', '',)
+
     def test_too_many_volumes(self):
         volume_ids = []
         for _i in range(CONF.quota_volumes):
@@ -766,7 +779,8 @@ class VolumeTypeQuotaEngineTestCase(test.TestCase):
         engine = quota.VolumeTypeQuotaEngine()
         self.assertEqual(engine.resource_names,
                          ['backup_gigabytes', 'backups',
-                          'gigabytes', 'snapshots', 'volumes'])
+                          'gigabytes', 'per_volume_gigabytes',
+                          'snapshots', 'volumes'])
 
     def test_volume_type_resources(self):
         ctx = context.RequestContext('admin', 'admin', is_admin=True)
@@ -792,8 +806,10 @@ class VolumeTypeQuotaEngineTestCase(test.TestCase):
         self.assertEqual(engine.resource_names,
                          ['backup_gigabytes', 'backups',
                           'gigabytes', 'gigabytes_type1', 'gigabytes_type_2',
-                          'snapshots', 'snapshots_type1', 'snapshots_type_2',
-                          'volumes', 'volumes_type1', 'volumes_type_2'])
+                          'per_volume_gigabytes', 'snapshots',
+                          'snapshots_type1', 'snapshots_type_2', 'volumes',
+                          'volumes_type1', 'volumes_type_2',
+                          ])
         db.volume_type_destroy(ctx, vtype['id'])
         db.volume_type_destroy(ctx, vtype2['id'])
 
@@ -834,7 +850,8 @@ class DbQuotaDriverTestCase(test.TestCase):
                 snapshots=10,
                 gigabytes=1000,
                 backups=10,
-                backup_gigabytes=1000))
+                backup_gigabytes=1000,
+                per_volume_gigabytes=-1))
 
     def _stub_quota_class_get_default(self):
         # Stub out quota_class_get_default
@@ -873,7 +890,8 @@ class DbQuotaDriverTestCase(test.TestCase):
                                       gigabytes=500,
                                       snapshots=10,
                                       backups=10,
-                                      backup_gigabytes=500))
+                                      backup_gigabytes=500,
+                                      per_volume_gigabytes=-1))
 
     def test_get_class_quotas_no_defaults(self):
         self._stub_quota_class_get_all_by_name()
@@ -937,6 +955,9 @@ class DbQuotaDriverTestCase(test.TestCase):
                                       backup_gigabytes=dict(limit=50,
                                                             in_use=10,
                                                             reserved=0, ),
+                                      per_volume_gigabytes=dict(in_use=0,
+                                                                limit=-1,
+                                                                reserved= 0)
                                       ))
 
     def test_get_project_quotas_alt_context_no_class(self):
@@ -964,6 +985,10 @@ class DbQuotaDriverTestCase(test.TestCase):
                                       backup_gigabytes=dict(limit=50,
                                                             in_use=10,
                                                             reserved=0, ),
+                                      per_volume_gigabytes=dict(in_use=0,
+                                                                limit=-1,
+                                                                reserved=0)
+
                                       ))
 
     def test_get_project_quotas_alt_context_with_class(self):
@@ -992,6 +1017,10 @@ class DbQuotaDriverTestCase(test.TestCase):
                                       backup_gigabytes=dict(limit=50,
                                                             in_use=10,
                                                             reserved=0, ),
+                                      per_volume_gigabytes=dict(in_use=0,
+                                                                limit=-1,
+                                                                reserved= 0)
+
                                       ))
 
     def test_get_project_quotas_no_defaults(self):
@@ -1038,7 +1067,8 @@ class DbQuotaDriverTestCase(test.TestCase):
                                       snapshots=dict(limit=10, ),
                                       backups=dict(limit=10, ),
                                       gigabytes=dict(limit=50, ),
-                                      backup_gigabytes=dict(limit=50, ),))
+                                      backup_gigabytes=dict(limit=50, ),
+                                      per_volume_gigabytes=dict(limit=-1, )))
 
     def _stub_get_project_quotas(self):
         def fake_get_project_quotas(context, resources, project_id,
index 79852531104f7d251e96cff56a5f7e1ead0f1dea..ad1ac41faa73710197d838471cb021fd4f80ca13 100644 (file)
@@ -571,6 +571,15 @@ class QuotaReserveTask(flow_utils.CinderTask):
         super(QuotaReserveTask, self).__init__(addons=[ACTION])
 
     def execute(self, context, size, volume_type_id, optional_args):
+        try:
+            values = {'per_volume_gigabytes': size}
+            QUOTAS.limit_check(context, project_id=context.project_id,
+                               **values)
+        except exception.OverQuota as e:
+            quotas = e.kwargs['quotas']
+            raise exception.VolumeSizeExceedsLimit(
+                size=size, limit=quotas['per_volume_gigabytes'])
+
         try:
             reserve_opts = {'volumes': 1, 'gigabytes': size}
             QUOTAS.add_volume_type_opts(context, reserve_opts, volume_type_id)