--- /dev/null
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log as logging
+from oslo_utils import timeutils
+from sqlalchemy import MetaData, Table
+
+from cinder.i18n import _LE
+
+# Get default value via config. The default will either
+# come from the default value set in the quota configuration option
+# or via cinder.conf if the user has configured
+# default value for per volume size limit there.
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ """Add default "per_volume_gigabytes" row into DB."""
+ meta = MetaData()
+ meta.bind = migrate_engine
+ quota_classes = Table('quota_classes', meta, autoload=True)
+ row = quota_classes.count().\
+ where(quota_classes.c.resource == 'per_volume_gigabytes').\
+ execute().scalar()
+
+ # Do not add entry if there is already 'default' entry exists
+ # in the database.
+ # We don't want to write over something the user added.
+ if row:
+ return
+
+ try:
+ # Set default per_volume_gigabytes for per volume size
+ qci = quota_classes.insert()
+ qci.execute({'created_at': timeutils.utcnow(),
+ 'class_name': 'default',
+ 'resource': 'per_volume_gigabytes',
+ 'hard_limit': -1,
+ 'deleted': False, })
+ except Exception:
+ LOG.error(_LE("Default per_volume_gigabytes row not inserted "
+ "into the quota_classes."))
+ raise
+
+
+def downgrade(migrate_engine):
+ """Don't delete the 'default' entries at downgrade time.
+ We don't know if the user had default entries when we started.
+ If they did, we wouldn't want to remove them. So, the safest
+ thing to do is just leave the 'default' entries at downgrade time.
+ """
+ pass
"%(consumed)sG has been consumed.")
+class VolumeSizeExceedsLimit(QuotaError):
+ message = _("Requested volume size %(size)d is larger than "
+ "maximum allowed limit %(limit)d.")
+
+
class VolumeBackupSizeExceedsAvailableQuota(QuotaError):
message = _("Requested backup exceeds allowed Backup gigabytes "
"quota. Requested %(requested)sG, quota is %(quota)sG and "
cfg.BoolOpt('use_default_quota_class',
default=True,
help='Enables or disables use of default quota class '
- 'with default quota.'), ]
+ 'with default quota.'),
+ cfg.IntOpt('per_volume_size_limit',
+ default=-1,
+ help='Max size allowed per volume, in gigabytes'), ]
CONF = cfg.CONF
CONF.register_opts(quota_opts)
"""
super(ReservableResource, self).__init__(name, flag=flag)
- self.sync = sync
+ if sync:
+ self.sync = sync
class AbsoluteResource(BaseResource):
result = {}
# Global quotas.
argses = [('volumes', '_sync_volumes', 'quota_volumes'),
+ ('per_volume_gigabytes', None, 'per_volume_size_limit'),
('snapshots', '_sync_snapshots', 'quota_snapshots'),
('gigabytes', '_sync_gigabytes', 'quota_gigabytes'),
('backups', '_sync_backups', 'quota_backups'),
def make_body(root=True, gigabytes=1000, snapshots=10,
volumes=10, backups=10, backup_gigabytes=1000,
- tenant_id='foo'):
+ tenant_id='foo', per_volume_gigabytes=-1):
resources = {'gigabytes': gigabytes,
'snapshots': snapshots,
'volumes': volumes,
'backups': backups,
- 'backup_gigabytes': backup_gigabytes}
+ 'backup_gigabytes': backup_gigabytes,
+ 'per_volume_gigabytes': per_volume_gigabytes, }
# need to consider preexisting volume types as well
volume_types = db.volume_type_get_all(context.get_admin_context())
for volume_type in volume_types:
def make_body(root=True, gigabytes=1000, snapshots=10,
volumes=10, backups=10,
- backup_gigabytes=1000,
+ backup_gigabytes=1000, per_volume_gigabytes=-1,
volume_types_faked=None,
tenant_id='foo'):
resources = {'gigabytes': gigabytes,
'snapshots': snapshots,
'volumes': volumes,
'backups': backups,
+ 'per_volume_gigabytes': per_volume_gigabytes,
'backup_gigabytes': backup_gigabytes}
if not volume_types_faked:
volume_types_faked = {'fake_type': None}
backup['status'] = 'available'
return db.backup_create(self.context, backup)
+ def test_volume_size_limit_exceeds(self):
+ resource = 'volumes_%s' % self.volume_type_name
+ db.quota_class_create(self.context, 'default', resource, 1)
+ flag_args = {
+ 'quota_volumes': 10,
+ 'quota_gigabytes': 1000,
+ 'per_volume_size_limit': 5
+ }
+ self.flags(**flag_args)
+ self.assertRaises(exception.VolumeSizeExceedsLimit,
+ volume.API().create,
+ self.context, 10, '', '',)
+
def test_too_many_volumes(self):
volume_ids = []
for _i in range(CONF.quota_volumes):
engine = quota.VolumeTypeQuotaEngine()
self.assertEqual(engine.resource_names,
['backup_gigabytes', 'backups',
- 'gigabytes', 'snapshots', 'volumes'])
+ 'gigabytes', 'per_volume_gigabytes',
+ 'snapshots', 'volumes'])
def test_volume_type_resources(self):
ctx = context.RequestContext('admin', 'admin', is_admin=True)
self.assertEqual(engine.resource_names,
['backup_gigabytes', 'backups',
'gigabytes', 'gigabytes_type1', 'gigabytes_type_2',
- 'snapshots', 'snapshots_type1', 'snapshots_type_2',
- 'volumes', 'volumes_type1', 'volumes_type_2'])
+ 'per_volume_gigabytes', 'snapshots',
+ 'snapshots_type1', 'snapshots_type_2', 'volumes',
+ 'volumes_type1', 'volumes_type_2',
+ ])
db.volume_type_destroy(ctx, vtype['id'])
db.volume_type_destroy(ctx, vtype2['id'])
snapshots=10,
gigabytes=1000,
backups=10,
- backup_gigabytes=1000))
+ backup_gigabytes=1000,
+ per_volume_gigabytes=-1))
def _stub_quota_class_get_default(self):
# Stub out quota_class_get_default
gigabytes=500,
snapshots=10,
backups=10,
- backup_gigabytes=500))
+ backup_gigabytes=500,
+ per_volume_gigabytes=-1))
def test_get_class_quotas_no_defaults(self):
self._stub_quota_class_get_all_by_name()
backup_gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
+ per_volume_gigabytes=dict(in_use=0,
+ limit=-1,
+ reserved= 0)
))
def test_get_project_quotas_alt_context_no_class(self):
backup_gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
+ per_volume_gigabytes=dict(in_use=0,
+ limit=-1,
+ reserved=0)
+
))
def test_get_project_quotas_alt_context_with_class(self):
backup_gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
+ per_volume_gigabytes=dict(in_use=0,
+ limit=-1,
+ reserved= 0)
+
))
def test_get_project_quotas_no_defaults(self):
snapshots=dict(limit=10, ),
backups=dict(limit=10, ),
gigabytes=dict(limit=50, ),
- backup_gigabytes=dict(limit=50, ),))
+ backup_gigabytes=dict(limit=50, ),
+ per_volume_gigabytes=dict(limit=-1, )))
def _stub_get_project_quotas(self):
def fake_get_project_quotas(context, resources, project_id,
super(QuotaReserveTask, self).__init__(addons=[ACTION])
def execute(self, context, size, volume_type_id, optional_args):
+ try:
+ values = {'per_volume_gigabytes': size}
+ QUOTAS.limit_check(context, project_id=context.project_id,
+ **values)
+ except exception.OverQuota as e:
+ quotas = e.kwargs['quotas']
+ raise exception.VolumeSizeExceedsLimit(
+ size=size, limit=quotas['per_volume_gigabytes'])
+
try:
reserve_opts = {'volumes': 1, 'gigabytes': size}
QUOTAS.add_volume_type_opts(context, reserve_opts, volume_type_id)