return dict(quota_set=quota_set)
def _validate_existing_resource(self, key, value, quota_values):
- if key == 'per_volume_gigabytes':
+ # -1 limit will always be greater than the existing value
+ if key == 'per_volume_gigabytes' or value == -1:
return
v = quota_values.get(key, {})
- if value < (v.get('in_use', 0) + v.get('reserved', 0)):
+ used = (v.get('in_use', 0) + v.get('reserved', 0))
+ if QUOTAS.using_nested_quotas():
+ used += v.get('allocated', 0)
+ if value < used:
msg = _("Quota %s limit must be equal or greater than existing "
"resources.") % key
raise webob.exc.HTTPBadRequest(explanation=msg)
- def _validate_quota_limit(self, quota, key, project_quotas=None,
- parent_project_quotas=None):
- limit = self.validate_integer(quota[key], key, min_value=-1,
- max_value=db.MAX_INT)
-
- # If a parent quota is unlimited (-1) no validation needs to happen
- # for the amount of existing free quota
- # TODO(mc_nair): will need to recurse up for nested quotas once
- # -1 child project values are enabled
- if parent_project_quotas and parent_project_quotas[key]['limit'] != -1:
- free_quota = (parent_project_quotas[key]['limit'] -
- parent_project_quotas[key]['in_use'] -
- parent_project_quotas[key]['reserved'] -
- parent_project_quotas[key].get('allocated', 0))
-
- current = 0
- if project_quotas.get(key):
- current = project_quotas[key]['limit']
- # -1 limit doesn't change free quota available in parent
- if current == -1:
- current = 0
-
- # Add back the existing quota limit (if any is set) from the
- # current free quota since it will be getting reset and is part
- # of the parent's allocated value
- free_quota += current
-
- if limit > free_quota:
- msg = _("Free quota available is %s.") % free_quota
- raise webob.exc.HTTPBadRequest(explanation=msg)
- return limit
-
def _get_quotas(self, context, id, usages=False):
values = QUOTAS.get_project_quotas(context, id, usages=usages)
# Get the parent_id of the target project to verify whether we are
# dealing with hierarchical namespace or non-hierarchical namespace
target_project = quota_utils.get_project_hierarchy(
- context, target_project_id)
+ context, target_project_id, parents_as_ids=True)
parent_id = target_project.parent_id
if parent_id:
self._authorize_update_or_delete(context_project,
target_project.id,
parent_id)
- parent_project_quotas = QUOTAS.get_project_quotas(
- context, parent_id)
# NOTE(ankit): Pass #2 - In this loop for body['quota_set'].keys(),
# we validate the quota limits to ensure that we can bail out if
quota_values = QUOTAS.get_project_quotas(context, target_project_id,
defaults=False)
valid_quotas = {}
- allocated_quotas = {}
+ reservations = []
for key in body['quota_set'].keys():
if key in NON_QUOTA_KEYS:
continue
- if not skip_flag:
- self._validate_existing_resource(key, value, quota_values)
+ value = self.validate_integer(
+ body['quota_set'][key], key, min_value=-1,
+ max_value=db.MAX_INT)
- if use_nested_quotas and parent_id:
- value = self._validate_quota_limit(body['quota_set'], key,
- quota_values,
- parent_project_quotas)
-
- if value < 0:
- # TODO(mc_nair): extend to handle -1 limits and recurse up
- # the hierarchy
- msg = _("Quota can't be set to -1 for child projects.")
- raise webob.exc.HTTPBadRequest(explanation=msg)
+ # Can't skip the validation of nested quotas since it could mess up
+ # hierarchy if parent limit is less than childrens' current usage
+ if not skip_flag or use_nested_quotas:
+ self._validate_existing_resource(key, value, quota_values)
- original_quota = 0
- if quota_values.get(key):
- original_quota = quota_values[key]['limit']
+ if use_nested_quotas:
+ try:
+ reservations += self._update_nested_quota_allocated(
+ context, target_project, quota_values, key, value)
+ except exception.OverQuota as e:
+ if reservations:
+ db.reservation_rollback(context, reservations)
+ raise webob.exc.HTTPBadRequest(explanation=e.message)
- allocated_quotas[key] = (
- parent_project_quotas[key].get('allocated', 0) + value -
- original_quota)
- else:
- value = self._validate_quota_limit(body['quota_set'], key)
valid_quotas[key] = value
# NOTE(ankit): Pass #3 - At this point we know that all the keys and
db.quota_create(context, target_project_id, key, value)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
- # If hierarchical projects, update child's quota first
- # and then parents quota. In future this needs to be an
- # atomic operation.
- if use_nested_quotas and parent_id:
- if key in allocated_quotas.keys():
- try:
- db.quota_allocated_update(context, parent_id, key,
- allocated_quotas[key])
- except exception.ProjectQuotaNotFound:
- parent_limit = parent_project_quotas[key]['limit']
- db.quota_create(context, parent_id, key, parent_limit,
- allocated=allocated_quotas[key])
+ if reservations:
+ db.reservation_commit(context, reservations)
return {'quota_set': self._get_quotas(context, target_project_id)}
+ def _get_quota_usage(self, quota_obj):
+ return (quota_obj.get('in_use', 0) + quota_obj.get('allocated', 0) +
+ quota_obj.get('reserved', 0))
+
+ def _update_nested_quota_allocated(self, ctxt, target_project,
+ target_project_quotas, res, new_limit):
+ reservations = []
+ # per_volume_gigabytes doesn't make sense to nest
+ if res == "per_volume_gigabytes":
+ return reservations
+
+ quota_for_res = target_project_quotas.get(res, {})
+ orig_quota_from_target_proj = quota_for_res.get('limit', 0)
+ # If limit was -1, we were "taking" current child's usage from parent
+ if orig_quota_from_target_proj == -1:
+ orig_quota_from_target_proj = self._get_quota_usage(quota_for_res)
+
+ new_quota_from_target_proj = new_limit
+ # If we set limit to -1, we will "take" the current usage from parent
+ if new_limit == -1:
+ new_quota_from_target_proj = self._get_quota_usage(quota_for_res)
+
+ res_change = new_quota_from_target_proj - orig_quota_from_target_proj
+ if res_change != 0:
+ deltas = {res: res_change}
+ reservations += quota_utils.update_alloc_to_next_hard_limit(
+ ctxt, QUOTAS.resources, deltas, res, None, target_project.id)
+
+ return reservations
+
@wsgi.serializers(xml=QuotaTemplate)
def defaults(self, req, id):
context = req.environ['cinder.context']
# If the project which is being deleted has allocated part of its
# quota to its subprojects, then subprojects' quotas should be
# deleted first.
- for key, value in project_quotas.items():
- if 'allocated' in project_quotas[key].keys():
- if project_quotas[key]['allocated'] != 0:
+ for res, value in project_quotas.items():
+ if 'allocated' in project_quotas[res].keys():
+ if project_quotas[res]['allocated'] != 0:
msg = _("About to delete child projects having "
"non-zero quota. This should not be performed")
raise webob.exc.HTTPBadRequest(explanation=msg)
self._authorize_update_or_delete(context_project,
target_project.id,
parent_id)
- parent_project_quotas = QUOTAS.get_project_quotas(
- ctxt, parent_id)
- # Delete child quota first and later update parent's quota.
try:
db.quota_destroy_by_project(ctxt, target_project.id)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
- # The parent "gives" quota to its child using the "allocated" value
- # and since the child project is getting deleted, we should restore
- # the child projects quota to the parent quota, but lowering it's
- # allocated value
- for key, value in project_quotas.items():
- project_hard_limit = project_quotas[key]['limit']
- parent_allocated = parent_project_quotas[key]['allocated']
- parent_allocated -= project_hard_limit
- db.quota_allocated_update(ctxt, parent_id, key,
- parent_allocated)
+ for res, limit in project_quotas.items():
+ # Update child limit to 0 so the parent hierarchy gets it's
+ # allocated values updated properly
+ self._update_nested_quota_allocated(
+ ctxt, target_project, project_quotas, res, 0)
def validate_setup_for_nested_quota_use(self, req):
"""Validates that the setup supports using nested quotas.
def quota_reserve(context, resources, quotas, deltas, expire,
- until_refresh, max_age, project_id=None):
+ until_refresh, max_age, project_id=None,
+ is_allocated_reserve=False):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, deltas, expire,
- until_refresh, max_age, project_id=project_id)
+ until_refresh, max_age, project_id=project_id,
+ is_allocated_reserve=is_allocated_reserve)
def reservation_commit(context, reservations, project_id=None):
def _reservation_create(context, uuid, usage, project_id, resource, delta,
- expire, session=None):
+ expire, session=None, allocated_id=None):
+ usage_id = usage['id'] if usage else None
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
- reservation_ref.usage_id = usage['id']
+ reservation_ref.usage_id = usage_id
reservation_ref.project_id = project_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
+ reservation_ref.allocated_id = allocated_id
reservation_ref.save(session=session)
return reservation_ref
@require_context
@_retry_on_deadlock
def quota_reserve(context, resources, quotas, deltas, expire,
- until_refresh, max_age, project_id=None):
+ until_refresh, max_age, project_id=None,
+ is_allocated_reserve=False):
elevated = context.elevated()
session = get_session()
with session.begin():
# a best-effort mechanism.
# Check for deltas that would go negative
- unders = [r for r, delta in deltas.items()
- if delta < 0 and delta + usages[r].in_use < 0]
+ if is_allocated_reserve:
+ unders = [r for r, delta in deltas.items()
+ if delta < 0 and delta + allocated.get(r, 0) < 0]
+ else:
+ unders = [r for r, delta in deltas.items()
+ if delta < 0 and delta + usages[r].in_use < 0]
+
+ # TODO(mc_nair): Should ignore/zero alloc if using non-nested driver
# Now, let's check the quotas
# NOTE(Vek): We're only concerned about positive increments.
if not overs:
reservations = []
for resource, delta in deltas.items():
- reservation = _reservation_create(elevated,
- str(uuid.uuid4()),
- usages[resource],
- project_id,
- resource, delta, expire,
- session=session)
+ usage = usages[resource]
+ allocated_id = None
+ if is_allocated_reserve:
+ try:
+ quota = _quota_get(context, project_id, resource,
+ session=session)
+ except exception.ProjectQuotaNotFound:
+ # If we were using the default quota, create DB entry
+ quota = quota_create(context, project_id, resource,
+ quotas[resource], 0)
+ # Since there's no reserved/total for allocated, update
+ # allocated immediately and subtract on rollback if needed
+ quota_allocated_update(context, project_id, resource,
+ quota.allocated + delta)
+ allocated_id = quota.id
+ usage = None
+ reservation = _reservation_create(
+ elevated, str(uuid.uuid4()), usage, project_id, resource,
+ delta, expire, session=session, allocated_id=allocated_id)
+
reservations.append(reservation.uuid)
# Also update the reserved quantity
#
# To prevent this, we only update the
# reserved value if the delta is positive.
- if delta > 0:
+ if delta > 0 and not is_allocated_reserve:
usages[resource].reserved += delta
if unders:
LOG.warning(_LW("Change will make usage less than 0 for the following "
"resources: %s"), unders)
if overs:
- usages = {k: dict(in_use=v['in_use'], reserved=v['reserved'])
+ usages = {k: dict(in_use=v.in_use, reserved=v.reserved,
+ allocated=allocated.get(k, 0))
for k, v in usages.items()}
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
usages=usages)
usages = _dict_with_usage_id(usages)
for reservation in _quota_reservations(session, context, reservations):
- usage = usages[reservation.usage_id]
- if reservation.delta >= 0:
- usage.reserved -= reservation.delta
- usage.in_use += reservation.delta
+ # Allocated reservations will have already been bumped
+ if not reservation.allocated_id:
+ usage = usages[reservation.usage_id]
+ if reservation.delta >= 0:
+ usage.reserved -= reservation.delta
+ usage.in_use += reservation.delta
reservation.delete(session=session)
usages = _get_quota_usages(context, session, project_id)
usages = _dict_with_usage_id(usages)
for reservation in _quota_reservations(session, context, reservations):
- usage = usages[reservation.usage_id]
- if reservation.delta >= 0:
- usage.reserved -= reservation.delta
+ if reservation.allocated_id:
+ reservation.quota.allocated -= reservation.delta
+ else:
+ usage = usages[reservation.usage_id]
+ if reservation.delta >= 0:
+ usage.reserved -= reservation.delta
reservation.delete(session=session)
if results:
for reservation in results:
if reservation.delta >= 0:
- reservation.usage.reserved -= reservation.delta
- reservation.usage.save(session=session)
+ if reservation.allocated_id:
+ reservation.quota.allocated -= reservation.delta
+ reservation.quota.save(session=session)
+ else:
+ reservation.usage.reserved -= reservation.delta
+ reservation.usage.save(session=session)
reservation.delete(session=session)
--- /dev/null
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column
+from sqlalchemy import MetaData, Integer, Table, ForeignKey
+
+
+def upgrade(migrate_engine):
+ """Add allocated_id to the reservations table."""
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ reservations = Table('reservations', meta, autoload=True)
+ Table('quotas', meta, autoload=True)
+ allocated_id = Column('allocated_id', Integer, ForeignKey('quotas.id'),
+ nullable=True)
+ reservations.create_column(allocated_id)
+ usage_id = reservations.c.usage_id
+ usage_id.alter(nullable=True)
id = Column(Integer, primary_key=True)
uuid = Column(String(36), nullable=False)
- usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
+ usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=True)
+ allocated_id = Column(Integer, ForeignKey('quotas.id'), nullable=True)
project_id = Column(String(255), index=True)
resource = Column(String(255))
foreign_keys=usage_id,
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
'QuotaUsage.deleted == 0)')
+ quota = relationship(
+ "Quota",
+ foreign_keys=allocated_id,
+ primaryjoin='and_(Reservation.allocated_id == Quota.id)')
class Snapshot(BASE, CinderBase):
# quotas, but that's a pretty rare thing.
quotas = self._get_quotas(context, resources, deltas.keys(),
has_sync=True, project_id=project_id)
+ return self._reserve(context, resources, quotas, deltas, expire,
+ project_id)
+ def _reserve(self, context, resources, quotas, deltas, expire, project_id):
# NOTE(Vek): Most of the work here has to be done in the DB
# API, because we have to do it in a transaction,
# which means access to the session. Since the
"""Ensures project_tree has quotas that make sense as nested quotas.
Validates the following:
- * No child projects have a limit of -1
* No parent project has child_projects who have more combined quota
than the parent's quota limit
* No child quota has a larger in-use value than it's current limit
(could happen before because child default values weren't enforced)
* All parent projects' "allocated" quotas match the sum of the limits
of its children projects
+
+ TODO(mc_nair): need a better way to "flip the switch" to use nested
+ quotas to make this less race-ee
"""
+ self._allocated = {}
project_queue = deque(project_tree.items())
borked_allocated_quotas = {}
while project_queue:
# Tuple of (current root node, subtree)
- cur_project_id, project_subtree = project_queue.popleft()
+ cur_proj_id, project_subtree = project_queue.popleft()
# If we're on a leaf node, no need to do validation on it, and in
# order to avoid complication trying to get its children, skip it.
continue
cur_project_quotas = self.get_project_quotas(
- ctxt, resources, cur_project_id)
-
- child_project_ids = project_subtree.keys()
- child_project_quotas = {child_id: self.get_project_quotas(
- ctxt, resources, child_id) for child_id in child_project_ids}
+ ctxt, resources, cur_proj_id)
# Validate each resource when compared to it's child quotas
for resource in cur_project_quotas.keys():
- child_limit_sum = 0
- for child_id, child_quota in child_project_quotas.items():
- child_limit = child_quota[resource]['limit']
- # Don't want to continue validation if -1 limit for child
- # TODO(mc_nair) - remove when allowing -1 for subprojects
- if child_limit < 0:
- msg = _("Quota limit is -1 for child project "
- "'%(proj)s' for resource '%(res)s'") % {
- 'proj': child_id, 'res': resource
- }
- raise exception.InvalidNestedQuotaSetup(reason=msg)
- # Handle the case that child default quotas weren't being
- # properly enforced before
- elif child_quota[resource].get('in_use', 0) > child_limit:
- msg = _("Quota limit invalid for project '%(proj)s' "
- "for resource '%(res)s': limit of %(limit)d "
- "is less than in-use value of %(used)d") % {
- 'proj': child_id, 'res': resource,
- 'limit': child_limit,
- 'used': child_quota[resource]['in_use']
- }
- raise exception.InvalidNestedQuotaSetup(reason=msg)
-
- child_limit_sum += child_quota[resource]['limit']
-
parent_quota = cur_project_quotas[resource]
parent_limit = parent_quota['limit']
- parent_usage = parent_quota['in_use']
- parent_allocated = parent_quota.get('allocated', 0)
+ parent_usage = (parent_quota['in_use'] +
+ parent_quota['reserved'])
+
+ cur_parent_allocated = parent_quota.get('allocated', 0)
+ calc_parent_allocated = self._get_cur_project_allocated(
+ ctxt, resources[resource], {cur_proj_id: project_subtree})
if parent_limit > 0:
parent_free_quota = parent_limit - parent_usage
- if parent_free_quota < child_limit_sum:
- msg = _("Sum of child limits '%(sum)s' is greater "
+ if parent_free_quota < calc_parent_allocated:
+ msg = _("Sum of child usage '%(sum)s' is greater "
"than free quota of '%(free)s' for project "
"'%(proj)s' for resource '%(res)s'. Please "
- "lower the limit for one or more of the "
- "following projects: '%(child_ids)s'") % {
- 'sum': child_limit_sum, 'free': parent_free_quota,
- 'proj': cur_project_id, 'res': resource,
- 'child_ids': ', '.join(child_project_ids)
+ "lower the limit or usage for one or more of "
+ "the following projects: '%(child_ids)s'") % {
+ 'sum': calc_parent_allocated,
+ 'free': parent_free_quota,
+ 'proj': cur_proj_id, 'res': resource,
+ 'child_ids': ', '.join(project_subtree.keys())
}
raise exception.InvalidNestedQuotaSetup(reason=msg)
- # Deal with the fact that using -1 limits in the past may
- # have messed some allocated values in DB
- if parent_allocated != child_limit_sum:
- # Decide whether to fix the allocated val or just
- # keep track of what's messed up
+ # If "allocated" value wasn't right either err or fix DB
+ if calc_parent_allocated != cur_parent_allocated:
if fix_allocated_quotas:
try:
- db.quota_allocated_update(ctxt, cur_project_id,
+ db.quota_allocated_update(ctxt, cur_proj_id,
resource,
- child_limit_sum)
+ calc_parent_allocated)
except exception.ProjectQuotaNotFound:
- # Handles the case that the project is using
- # default quota value so nothing present to update
+ # If it was default quota create DB entry for it
db.quota_create(
- ctxt, cur_project_id, resource,
- parent_limit, allocated=child_limit_sum)
+ ctxt, cur_proj_id, resource,
+ parent_limit, allocated=calc_parent_allocated)
else:
- if cur_project_id not in borked_allocated_quotas:
- borked_allocated_quotas[cur_project_id] = {}
+ if cur_proj_id not in borked_allocated_quotas:
+ borked_allocated_quotas[cur_proj_id] = {}
- borked_allocated_quotas[cur_project_id][resource] = {
- 'db_allocated_quota': parent_allocated,
- 'expected_allocated_quota': child_limit_sum}
+ borked_allocated_quotas[cur_proj_id][resource] = {
+ 'db_allocated_quota': cur_parent_allocated,
+ 'expected_allocated_quota': calc_parent_allocated}
project_queue.extend(project_subtree.items())
"project quotas: %s") % borked_allocated_quotas
raise exception.InvalidNestedQuotaSetup(message=msg)
+ def _get_cur_project_allocated(self, ctxt, resource, project_tree):
+ """Recursively calculates the allocated value of a project
+
+ :param ctxt: context used to retrieve DB values
+ :param resource: the resource to calculate allocated value for
+ :param project_tree: the project tree used to calculate allocated
+ e.g. {'A': {'B': {'D': None}, 'C': None}
+
+ A project's "allocated" value depends on:
+ 1) the quota limits which have been "given" to it's children, in
+ the case those limits are not unlimited (-1)
+ 2) the current quota being used by a child plus whatever the child
+ has given to it's children, in the case of unlimited (-1) limits
+
+ Scenario #2 requires recursively calculating allocated, and in order
+ to efficiently calculate things we will save off any previously
+ calculated allocated values.
+
+ NOTE: this currently leaves a race condition when a project's allocated
+ value has been calculated (with a -1 limit), but then a child project
+ gets a volume created, thus changing the in-use value and messing up
+ the child's allocated value. We should look into updating the allocated
+ values as we're going along and switching to NestedQuotaDriver with
+ flip of a switch.
+ """
+ # Grab the current node
+ cur_project_id = list(project_tree)[0]
+ project_subtree = project_tree[cur_project_id]
+ res_name = resource.name
+
+ if cur_project_id not in self._allocated:
+ self._allocated[cur_project_id] = {}
+
+ if res_name not in self._allocated[cur_project_id]:
+ # Calculate the allocated value for this resource since haven't yet
+ cur_project_allocated = 0
+ child_proj_ids = project_subtree.keys() if project_subtree else {}
+ res_dict = {res_name: resource}
+ child_project_quotas = {child_id: self.get_project_quotas(
+ ctxt, res_dict, child_id) for child_id in child_proj_ids}
+
+ for child_id, child_quota in child_project_quotas.items():
+ child_limit = child_quota[res_name]['limit']
+ # Non-unlimited quota is easy, anything explicitly given to a
+ # child project gets added into allocated value
+ if child_limit != -1:
+ if child_quota[res_name].get('in_use', 0) > child_limit:
+ msg = _("Quota limit invalid for project '%(proj)s' "
+ "for resource '%(res)s': limit of %(limit)d "
+ "is less than in-use value of %(used)d") % {
+ 'proj': child_id, 'res': res_name,
+ 'limit': child_limit,
+ 'used': child_quota[res_name]['in_use']
+ }
+ raise exception.InvalidNestedQuotaSetup(reason=msg)
+
+ cur_project_allocated += child_limit
+ # For -1, take any quota being eaten up by child, as well as
+ # what the child itself has given up to its children
+ else:
+ child_in_use = child_quota[res_name].get('in_use', 0)
+ # Recursively calculate child's allocated
+ child_alloc = self._get_cur_project_allocated(
+ ctxt, resource, {child_id: project_subtree[child_id]})
+ cur_project_allocated += child_in_use + child_alloc
+
+ self._allocated[cur_project_id][res_name] = cur_project_allocated
+
+ return self._allocated[cur_project_id][res_name]
+
def get_default(self, context, resource, project_id):
"""Get a specific default quota for a resource."""
resource = super(NestedDbQuotaDriver, self).get_default(
defaults[key] = 0
return defaults
+ def _reserve(self, context, resources, quotas, deltas, expire, project_id):
+ reserved = []
+ # As to not change the exception behavior, flag every res that would
+ # be over instead of failing on first OverQuota
+ resources_failed_to_update = []
+ failed_usages = {}
+ for res in deltas.keys():
+ try:
+ reserved += db.quota_reserve(
+ context, resources, quotas, {res: deltas[res]},
+ expire, CONF.until_refresh, CONF.max_age, project_id)
+ if quotas[res] == -1:
+ reserved += quota_utils.update_alloc_to_next_hard_limit(
+ context, resources, deltas, res, expire, project_id)
+ except exception.OverQuota as e:
+ resources_failed_to_update.append(res)
+ failed_usages.update(e.kwargs['usages'])
+
+ if resources_failed_to_update:
+ db.reservation_rollback(context, reserved, project_id)
+ # We change OverQuota to OverVolumeLimit in other places and expect
+ # to find all of the OverQuota kwargs
+ raise exception.OverQuota(overs=sorted(resources_failed_to_update),
+ quotas=quotas, usages=failed_usages)
+
+ return reserved
+
class BaseResource(object):
"""Describe a single resource for quota checking."""
def get_by_project(self, context, project_id, resource_name):
"""Get a specific quota by project."""
-
return self._driver.get_by_project(context, project_id, resource_name)
+ def get_by_project_or_default(self, context, project_id, resource_name):
+ """Get specific quota by project or default quota if doesn't exists."""
+ try:
+ val = self.get_by_project(
+ context, project_id, resource_name).hard_limit
+ except exception.ProjectQuotaNotFound:
+ val = self.get_defaults(context, project_id)[resource_name]
+
+ return val
+
def get_by_class(self, context, quota_class, resource_name):
"""Get a specific quota by quota class."""
from keystoneclient import exceptions
from keystoneclient import session
+from cinder import db
from cinder import exception
from cinder.i18n import _, _LW
return reservations
-def get_project_hierarchy(context, project_id, subtree_as_ids=False):
+def get_project_hierarchy(context, project_id, subtree_as_ids=False,
+ parents_as_ids=False):
"""A Helper method to get the project hierarchy.
Along with hierarchical multitenancy in keystone API v3, projects can be
generic_project = GenericProjectInfo(project_id, keystone.version)
if keystone.version == 'v3':
project = keystone.projects.get(project_id,
- subtree_as_ids=subtree_as_ids)
+ subtree_as_ids=subtree_as_ids,
+ parents_as_ids=parents_as_ids)
generic_project.parent_id = project.parent_id
generic_project.subtree = (
project.subtree if subtree_as_ids else None)
+ generic_project.parents = (
+ project.parents if parents_as_ids else None)
except exceptions.NotFound:
msg = (_("Tenant ID: %s does not exist.") % project_id)
raise webob.exc.HTTPNotFound(explanation=msg)
return project_roots
+def update_alloc_to_next_hard_limit(context, resources, deltas, res,
+ expire, project_id):
+ from cinder import quota
+ QUOTAS = quota.QUOTAS
+ reservations = []
+ projects = get_project_hierarchy(context, project_id,
+ parents_as_ids=True).parents
+ hard_limit_found = False
+ # Update allocated values up the chain til we hit a hard limit or run out
+ # of parents
+ while projects and not hard_limit_found:
+ cur_proj_id = list(projects)[0]
+ projects = projects[cur_proj_id]
+ cur_quota_lim = QUOTAS.get_by_project_or_default(
+ context, cur_proj_id, res)
+ hard_limit_found = (cur_quota_lim != -1)
+ cur_quota = {res: cur_quota_lim}
+ cur_delta = {res: deltas[res]}
+ try:
+ reservations += db.quota_reserve(
+ context, resources, cur_quota, cur_delta, expire,
+ CONF.until_refresh, CONF.max_age, cur_proj_id,
+ is_allocated_reserve=True)
+ except exception.OverQuota:
+ db.reservation_rollback(context, reservations)
+ raise
+ return reservations
+
+
def validate_setup_for_nested_quota_use(ctxt, resources,
nested_quota_driver,
fix_allocated_quotas=False):
from cinder.api.contrib import quotas
from cinder import context
from cinder import db
+from cinder import exception
from cinder import quota
from cinder import test
from cinder.tests.unit import test_db_api
def make_body(root=True, gigabytes=1000, snapshots=10,
volumes=10, backups=10, backup_gigabytes=1000,
- tenant_id='foo', per_volume_gigabytes=-1, is_child=False):
+ tenant_id='foo', per_volume_gigabytes=-1):
resources = {'gigabytes': gigabytes,
'snapshots': snapshots,
'volumes': volumes,
# need to consider preexisting volume types as well
volume_types = db.volume_type_get_all(context.get_admin_context())
- if not is_child:
- for volume_type in volume_types:
- resources['gigabytes_' + volume_type] = -1
- resources['snapshots_' + volume_type] = -1
- resources['volumes_' + volume_type] = -1
- elif per_volume_gigabytes < 0:
- # In the case that we're dealing with a child project, we aren't
- # allowing -1 limits for the time being, so hack this to some large
- # enough value for the tests that it's essentially unlimited
- # TODO(mc_nair): remove when -1 limits for child projects are allowed
- resources['per_volume_gigabytes'] = 10000
+ for volume_type in volume_types:
+ resources['gigabytes_' + volume_type] = -1
+ resources['snapshots_' + volume_type] = -1
+ resources['volumes_' + volume_type] = -1
if tenant_id:
resources['id'] = tenant_id
self.id = id
self.parent_id = parent_id
self.subtree = None
+ self.parents = None
def setUp(self):
super(QuotaSetsControllerTestBase, self).setUp()
self.req.params = {}
self._create_project_hierarchy()
+ self.req.environ['cinder.context'].project_id = self.A.id
get_patcher = mock.patch('cinder.quota_utils.get_project_hierarchy',
self._get_project)
self.B.subtree = {self.D.id: self.D.subtree}
self.A.subtree = {self.B.id: self.B.subtree, self.C.id: self.C.subtree}
+ self.A.parents = None
+ self.B.parents = {self.A.id: None}
+ self.C.parents = {self.A.id: None}
+ self.D.parents = {self.B.id: self.B.parents}
+
# project_by_id attribute is used to recover a project based on its id.
self.project_by_id = {self.A.id: self.A, self.B.id: self.B,
self.C.id: self.C, self.D.id: self.D}
- def _get_project(self, context, id, subtree_as_ids=False):
+ def _get_project(self, context, id, subtree_as_ids=False,
+ parents_as_ids=False):
return self.project_by_id.get(id, self.FakeProject())
+ def _create_fake_quota_usages(self, usage_map):
+ self._fake_quota_usages = {}
+ for key, val in usage_map.items():
+ self._fake_quota_usages[key] = {'in_use': val}
+
+ def _fake_quota_usage_get_all_by_project(self, context, project_id):
+ return {'volumes': self._fake_quota_usages[project_id]}
+
class QuotaSetsControllerTest(QuotaSetsControllerTestBase):
def setUp(self):
super(QuotaSetsControllerTest, self).setUp()
- fixture = self.useFixture(config_fixture.Config(quota.CONF))
- fixture.config(quota_driver="cinder.quota.DbQuotaDriver")
- quotas.QUOTAS = quota.VolumeTypeQuotaEngine()
- self.controller = quotas.QuotaSetsController()
def test_defaults(self):
result = self.controller.defaults(self.req, 'foo')
'skip_validation': 'false'}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'foo', body)
- body = {'quota_set': {'gigabytes': 1},
+ # Ensure that validation works even if some resources are valid
+ body = {'quota_set': {'gigabytes': 1, 'volumes': 10},
'skip_validation': 'false'}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'foo', body)
self.req.params['fix_allocated_quotas'] = True
self.controller.validate_setup_for_nested_quota_use(self.req)
- def _fake_quota_usage_get_all_by_project(self, context, project_id):
- proj_vals = {
- self.A.id: {'in_use': 1},
- self.B.id: {'in_use': 1},
- self.D.id: {'in_use': 0},
- self.C.id: {'in_use': 3},
- self.E.id: {'in_use': 0},
- self.F.id: {'in_use': 0},
- self.G.id: {'in_use': 0},
- }
- return {'volumes': proj_vals[project_id]}
-
@mock.patch('cinder.db.quota_usage_get_all_by_project')
def test_validate_nested_quotas_in_use_vols(self, mock_usage):
+ self._create_fake_quota_usages(
+ {self.A.id: 1, self.B.id: 1, self.D.id: 0, self.C.id: 3,
+ self.E.id: 0, self.F.id: 0, self.G.id: 0})
mock_usage.side_effect = self._fake_quota_usage_get_all_by_project
# Update the project A quota.
@mock.patch('cinder.db.quota_usage_get_all_by_project')
def test_validate_nested_quotas_quota_borked(self, mock_usage):
+ self._create_fake_quota_usages(
+ {self.A.id: 1, self.B.id: 1, self.D.id: 0, self.C.id: 3,
+ self.E.id: 0, self.F.id: 0, self.G.id: 0})
mock_usage.side_effect = self._fake_quota_usage_get_all_by_project
# Update the project A quota.
self.controller.validate_setup_for_nested_quota_use,
self.req)
- def test_validate_nested_quota_negative_limits(self):
- # When we're validating, update the allocated values since we've
- # been updating child limits
- self.req.params['fix_allocated_quotas'] = True
- self.controller.validate_setup_for_nested_quota_use(self.req)
- # Update the project A quota.
+ @mock.patch('cinder.db.quota_usage_get_all_by_project')
+ def test_validate_nested_quota_negative_limits(self, mock_usage):
+ # TODO(mc_nair): this test case can be moved to Tempest once nested
+ # quota coverage added
+ self._create_fake_quota_usages(
+ {self.A.id: 1, self.B.id: 3, self.C.id: 0, self.D.id: 2,
+ self.E.id: 2, self.F.id: 0, self.G.id: 0})
+ mock_usage.side_effect = self._fake_quota_usage_get_all_by_project
+
+ # Setting E-F as children of D for this test case to flex the muscles
+ # of more complex nesting
+ self.D.subtree = {self.E.id: self.E.subtree}
+ self.E.parent_id = self.D.id
+ # Get B's subtree up to date with this change
+ self.B.subtree[self.D.id] = self.D.subtree
+
+ # Quota heirarchy now is
+ # / B - D - E - F
+ # A
+ # \ C
+ #
+ # G
+
self.req.environ['cinder.context'].project_id = self.A.id
- quota_limit = {'volumes': -1}
+ quota_limit = {'volumes': 10}
body = {'quota_set': quota_limit}
self.controller.update(self.req, self.A.id, body)
- quota_limit['volumes'] = 4
+ quota_limit['volumes'] = 1
+ self.controller.update(self.req, self.C.id, body)
+
+ quota_limit['volumes'] = -1
self.controller.update(self.req, self.B.id, body)
+ self.controller.update(self.req, self.D.id, body)
+ self.controller.update(self.req, self.F.id, body)
+ quota_limit['volumes'] = 5
+ self.controller.update(self.req, self.E.id, body)
- self.controller.validate_setup_for_nested_quota_use(self.req)
+ # Should fail because too much is allocated to children for A
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.validate_setup_for_nested_quota_use,
+ self.req)
+ # When root has -1 limit, children can allocate as much as they want
quota_limit['volumes'] = -1
- self.controller.update(self.req, self.F.id, body)
- # Should not work because can't have a child with negative limits
- self.assertRaises(
- webob.exc.HTTPBadRequest,
- self.controller.validate_setup_for_nested_quota_use,
- self.req)
+ self.controller.update(self.req, self.A.id, body)
+ self.req.params['fix_allocated_quotas'] = True
+ self.controller.validate_setup_for_nested_quota_use(self.req)
+
+ # Not unlimited, but make children's allocated within bounds
+ quota_limit['volumes'] = 10
+ self.controller.update(self.req, self.A.id, body)
+ quota_limit['volumes'] = 3
+ self.controller.update(self.req, self.E.id, body)
+ self.req.params['fix_allocated_quotas'] = True
+ self.controller.validate_setup_for_nested_quota_use(self.req)
+ self.req.params['fix_allocated_quotas'] = False
+ self.controller.validate_setup_for_nested_quota_use(self.req)
class QuotaSetsControllerNestedQuotasTest(QuotaSetsControllerTestBase):
def setUp(self):
super(QuotaSetsControllerNestedQuotasTest, self).setUp()
- fixture = self.useFixture(config_fixture.Config(quota.CONF))
- fixture.config(quota_driver="cinder.quota.NestedDbQuotaDriver")
- quotas.QUOTAS = quota.VolumeTypeQuotaEngine()
- self.controller = quotas.QuotaSetsController()
+ driver = quota.NestedDbQuotaDriver()
+ patcher = mock.patch('cinder.quota.VolumeTypeQuotaEngine._driver',
+ driver)
+ patcher.start()
+ self.addCleanup(patcher.stop)
def test_subproject_defaults(self):
context = self.req.environ['cinder.context']
self.req, self.A.id)
def test_update_subproject_not_in_hierarchy(self):
-
# Create another project hierarchy
E = self.FakeProject(id=uuid.uuid4().hex, parent_id=None)
F = self.FakeProject(id=uuid.uuid4().hex, parent_id=E.id)
# Update the quota of B to be equal to its parent quota
self.req.environ['cinder.context'].project_id = self.A.id
body = make_body(gigabytes=2000, snapshots=15,
- volumes=5, backups=5, tenant_id=None, is_child=True)
+ volumes=5, backups=5, tenant_id=None)
result = self.controller.update(self.req, self.B.id, body)
self.assertDictMatch(body, result)
# Try to update the quota of C, it will not be allowed, since the
# project A doesn't have free quota available.
self.req.environ['cinder.context'].project_id = self.A.id
body = make_body(gigabytes=2000, snapshots=15,
- volumes=5, backups=5, tenant_id=None, is_child=True)
+ volumes=5, backups=5, tenant_id=None)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, self.C.id, body)
# Successfully update the quota of D.
self.req.environ['cinder.context'].project_id = self.A.id
body = make_body(gigabytes=1000, snapshots=7,
- volumes=3, backups=3, tenant_id=None, is_child=True)
+ volumes=3, backups=3, tenant_id=None)
result = self.controller.update(self.req, self.D.id, body)
self.assertDictMatch(body, result)
# An admin of B can also update the quota of D, since D is its
# immediate child.
self.req.environ['cinder.context'].project_id = self.B.id
body = make_body(gigabytes=1500, snapshots=10,
- volumes=4, backups=4, tenant_id=None, is_child=True)
+ volumes=4, backups=4, tenant_id=None)
self.controller.update(self.req, self.D.id, body)
- def test_update_subproject_negative_limit(self):
- # Should not be able to set a negative limit for a child project (will
- # require further fixes to allow for this)
- self.req.environ['cinder.context'].project_id = self.A.id
- body = make_body(volumes=-1, is_child=True)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.update, self.req, self.B.id, body)
-
def test_update_subproject_repetitive(self):
# Update the project A volumes quota.
self.req.environ['cinder.context'].project_id = self.A.id
for i in range(0, 3):
self.req.environ['cinder.context'].project_id = self.A.id
body = make_body(gigabytes=2000, snapshots=15,
- volumes=10, backups=5, tenant_id=None,
- is_child=True)
+ volumes=10, backups=5, tenant_id=None)
result = self.controller.update(self.req, self.B.id, body)
self.assertDictMatch(body, result)
self.req.environ['cinder.context'].project_id = self.A.id
# Update the project B quota.
expected = make_body(gigabytes=1000, snapshots=10,
- volumes=5, backups=5, tenant_id=None,
- is_child=True)
+ volumes=5, backups=5, tenant_id=None)
result = self.controller.update(self.req, self.B.id, expected)
self.assertDictMatch(expected, result)
+ def _assert_quota_show(self, proj_id, resource, in_use=0, reserved=0,
+ allocated=0, limit=0):
+ self.req.params = {'usage': 'True'}
+ show_res = self.controller.show(self.req, proj_id)
+ expected = {'in_use': in_use, 'reserved': reserved,
+ 'allocated': allocated, 'limit': limit}
+ self.assertEqual(expected, show_res['quota_set'][resource])
+
+ def test_project_allocated_considered_on_reserve(self):
+ def _reserve(project_id):
+ quotas.QUOTAS._driver.reserve(
+ self.req.environ['cinder.context'], quotas.QUOTAS.resources,
+ {'volumes': 1}, project_id=project_id)
+
+ # A's quota will default to 10 for volumes
+ quota = {'volumes': 5}
+ body = {'quota_set': quota}
+ self.controller.update(self.req, self.B.id, body)
+ self._assert_quota_show(self.A.id, 'volumes', allocated=5, limit=10)
+ quota['volumes'] = 3
+ self.controller.update(self.req, self.C.id, body)
+ self._assert_quota_show(self.A.id, 'volumes', allocated=8, limit=10)
+ _reserve(self.A.id)
+ _reserve(self.A.id)
+ self.assertRaises(exception.OverQuota, _reserve, self.A.id)
+
+ def test_update_parent_project_lower_than_child(self):
+ # A's quota will be default of 10
+ quota = {'volumes': 10}
+ body = {'quota_set': quota}
+ self.controller.update(self.req, self.B.id, body)
+ quota['volumes'] = 9
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, self.req, self.A.id, body)
+
def test_subproject_delete(self):
self.req.environ['cinder.context'].project_id = self.A.id
- body = make_body(gigabytes=2000, snapshots=15,
- volumes=5, backups=5,
- backup_gigabytes=1000, tenant_id=None, is_child=True)
+ body = make_body(gigabytes=2000, snapshots=15, volumes=5, backups=5,
+ backup_gigabytes=1000, tenant_id=None)
result_update = self.controller.update(self.req, self.A.id, body)
self.assertDictMatch(body, result_update)
self.controller.update(self.req, self.A.id, body)
# Allocate some of that quota to a child project
- body = make_body(volumes=3, is_child=True)
+ body = make_body(volumes=3)
self.controller.update(self.req, self.B.id, body)
# Deleting 'A' should be disallowed since 'B' is using some of that
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
self.req, self.A.id)
+ def test_subproject_delete_with_child_updates_parent_allocated(self):
+ def _assert_delete_updates_allocated():
+ res = 'volumes'
+ self._assert_quota_show(self.A.id, res, allocated=2, limit=5)
+ self._assert_quota_show(self.B.id, res, allocated=2, limit=-1)
+ self.controller.delete(self.req, self.D.id)
+ self._assert_quota_show(self.A.id, res, allocated=0, limit=5)
+ self._assert_quota_show(self.B.id, res, allocated=0, limit=-1)
+
+ quota = {'volumes': 5}
+ body = {'quota_set': quota}
+ self.controller.update(self.req, self.A.id, body)
+
+ # Allocate some of that quota to a child project using hard limit
+ quota['volumes'] = -1
+ self.controller.update(self.req, self.B.id, body)
+ quota['volumes'] = 2
+ self.controller.update(self.req, self.D.id, body)
+
+ _assert_delete_updates_allocated()
+
+ # Allocate some of that quota to a child project by using volumes
+ quota['volumes'] = -1
+ self.controller.update(self.req, self.D.id, body)
+ for x in range(2):
+ quotas.QUOTAS._driver.reserve(
+ self.req.environ['cinder.context'], quotas.QUOTAS.resources,
+ {'volumes': 1}, project_id=self.D.id)
+
+ _assert_delete_updates_allocated()
+
+ def test_negative_child_limit_not_affecting_parents_free_quota(self):
+ quota = {'volumes': -1}
+ body = {'quota_set': quota}
+ self.controller.update(self.req, self.C.id, body)
+ self.controller.update(self.req, self.B.id, body)
+
+ # Shouldn't be able to set greater than parent
+ quota['volumes'] = 11
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ self.req, self.B.id, body)
+
+ def test_child_neg_limit_set_grandkid_zero_limit(self):
+ cur_quota_a = self.controller.show(self.req, self.A.id)
+ self.assertEqual(10, cur_quota_a['quota_set']['volumes'])
+
+ quota = {'volumes': -1}
+ body = {'quota_set': quota}
+ self.controller.update(self.req, self.B.id, body)
+
+ cur_quota_d = self.controller.show(self.req, self.D.id)
+ # Default child value is 0
+ self.assertEqual(0, cur_quota_d['quota_set']['volumes'])
+ # Should be able to set D explicitly to 0 since that's already the val
+ quota['volumes'] = 0
+ self.controller.update(self.req, self.D.id, body)
+
+ def test_grandkid_negative_one_limit_enforced(self):
+ quota = {'volumes': 2, 'gigabytes': 2}
+ body = {'quota_set': quota}
+ self.controller.update(self.req, self.A.id, body)
+
+ quota['volumes'] = -1
+ quota['gigabytes'] = -1
+ self.controller.update(self.req, self.B.id, body)
+ self.controller.update(self.req, self.C.id, body)
+ self.controller.update(self.req, self.D.id, body)
+
+ def _reserve(project_id):
+ quotas.QUOTAS._driver.reserve(
+ self.req.environ['cinder.context'], quotas.QUOTAS.resources,
+ {'volumes': 1, 'gigabytes': 1}, project_id=project_id)
+
+ _reserve(self.C.id)
+ _reserve(self.D.id)
+ self.assertRaises(exception.OverQuota, _reserve, self.B.id)
+ self.assertRaises(exception.OverQuota, _reserve, self.C.id)
+ self.assertRaises(exception.OverQuota, _reserve, self.D.id)
+
+ # Make sure the rollbacks went successfully for allocated for all res
+ for res in quota.keys():
+ self._assert_quota_show(self.A.id, res, allocated=2, limit=2)
+ self._assert_quota_show(self.B.id, res, allocated=1, limit=-1)
+ self._assert_quota_show(self.C.id, res, reserved=1, limit=-1)
+ self._assert_quota_show(self.D.id, res, reserved=1, limit=-1)
+
+ def test_child_update_affects_allocated_and_rolls_back(self):
+ quota = {'gigabytes': -1, 'volumes': 3}
+ body = {'quota_set': quota}
+ self.controller.update(self.req, self.A.id, body)
+ quota['volumes'] = -1
+ self.controller.update(self.req, self.B.id, body)
+ quota['volumes'] = 1
+ self.controller.update(self.req, self.C.id, body)
+
+ # Shouldn't be able to update to greater than the grandparent
+ quota['volumes'] = 3
+ quota['gigabytes'] = 1
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, self.req, self.D.id, body)
+ # Validate we haven't updated either parents' allocated value for
+ # any of the keys (even if some keys were valid)
+ self._assert_quota_show(self.A.id, 'volumes', allocated=1, limit=3)
+ self._assert_quota_show(self.A.id, 'gigabytes', limit=-1)
+ self._assert_quota_show(self.B.id, 'volumes', limit=-1)
+ self._assert_quota_show(self.B.id, 'gigabytes', limit=-1)
+
+ quota['volumes'] = 2
+ self.controller.update(self.req, self.D.id, body)
+ # Validate we have now updated the parent and grandparents'
+ self.req.params = {'usage': 'True'}
+ self._assert_quota_show(self.A.id, 'volumes', allocated=3, limit=3)
+ self._assert_quota_show(self.A.id, 'gigabytes', allocated=1, limit=-1)
+ self._assert_quota_show(self.B.id, 'volumes', allocated=2, limit=-1)
+ self._assert_quota_show(self.B.id, 'gigabytes', allocated=1, limit=-1)
+
+ def test_negative_child_limit_reserve_and_rollback(self):
+ quota = {'volumes': 2, 'gigabytes': 2}
+ body = {'quota_set': quota}
+ self.controller.update(self.req, self.A.id, body)
+
+ quota['volumes'] = -1
+ quota['gigabytes'] = -1
+ self.controller.update(self.req, self.B.id, body)
+ self.controller.update(self.req, self.C.id, body)
+ self.controller.update(self.req, self.D.id, body)
+
+ res = quotas.QUOTAS._driver.reserve(
+ self.req.environ['cinder.context'], quotas.QUOTAS.resources,
+ {'volumes': 2, 'gigabytes': 2}, project_id=self.D.id)
+
+ self.req.params = {'usage': 'True'}
+ quota_b = self.controller.show(self.req, self.B.id)
+ self.assertEqual(2, quota_b['quota_set']['volumes']['allocated'])
+ # A will be the next hard limit to set
+ quota_a = self.controller.show(self.req, self.A.id)
+ self.assertEqual(2, quota_a['quota_set']['volumes']['allocated'])
+ quota_d = self.controller.show(self.req, self.D.id)
+ self.assertEqual(2, quota_d['quota_set']['volumes']['reserved'])
+
+ quotas.QUOTAS.rollback(self.req.environ['cinder.context'], res,
+ self.D.id)
+ # After the rollback, A's limit should be properly set again
+ quota_a = self.controller.show(self.req, self.A.id)
+ self.assertEqual(0, quota_a['quota_set']['volumes']['allocated'])
+ quota_d = self.controller.show(self.req, self.D.id)
+ self.assertEqual(0, quota_d['quota_set']['volumes']['in_use'])
+
+ @mock.patch('cinder.db.sqlalchemy.api._get_quota_usages')
+ @mock.patch('cinder.db.quota_usage_get_all_by_project')
+ def test_nested_quota_set_negative_limit(self, mock_usage, mock_get_usage):
+ # TODO(mc_nair): this test should be moved to Tempest once nested quota
+ # coverage is added
+ fake_usages = {self.A.id: 1, self.B.id: 1, self.D.id: 2, self.C.id: 0}
+ self._create_fake_quota_usages(fake_usages)
+ mock_usage.side_effect = self._fake_quota_usage_get_all_by_project
+
+ class FakeUsage(object):
+ def __init__(self, in_use, reserved):
+ self.in_use = in_use
+ self.reserved = reserved
+ self.until_refresh = None
+ self.total = self.reserved + self.in_use
+
+ def _fake__get_quota_usages(context, session, project_id):
+ if not project_id:
+ return {}
+ return {'volumes': FakeUsage(fake_usages[project_id], 0)}
+ mock_get_usage.side_effect = _fake__get_quota_usages
+
+ # Update the project A quota.
+ quota_limit = {'volumes': 7}
+ body = {'quota_set': quota_limit}
+ self.controller.update(self.req, self.A.id, body)
+
+ quota_limit['volumes'] = 4
+ self.controller.update(self.req, self.B.id, body)
+ quota_limit['volumes'] = -1
+ self.controller.update(self.req, self.D.id, body)
+
+ quota_limit['volumes'] = 1
+ self.controller.update(self.req, self.C.id, body)
+
+ self.req.params['fix_allocated_quotas'] = True
+ self.controller.validate_setup_for_nested_quota_use(self.req)
+
+ # Validate that the allocated values look right for each project
+ self.req.params = {'usage': 'True'}
+
+ res = 'volumes'
+ # A has given 4 vols to B and 1 vol to C (from limits)
+ self._assert_quota_show(self.A.id, res, allocated=5, in_use=1, limit=7)
+ self._assert_quota_show(self.B.id, res, allocated=2, in_use=1, limit=4)
+ self._assert_quota_show(self.D.id, res, in_use=2, limit=-1)
+ self._assert_quota_show(self.C.id, res, limit=1)
+
+ # Update B to -1 limit, and make sure that A's allocated gets updated
+ # with B + D's in_use values (one less than current limit
+ quota_limit['volumes'] = -1
+ self.controller.update(self.req, self.B.id, body)
+ self._assert_quota_show(self.A.id, res, allocated=4, in_use=1, limit=7)
+
+ quota_limit['volumes'] = 6
+ self.assertRaises(
+ webob.exc.HTTPBadRequest,
+ self.controller.update, self.req, self.B.id, body)
+
+ quota_limit['volumes'] = 5
+ self.controller.update(self.req, self.B.id, body)
+ self._assert_quota_show(self.A.id, res, allocated=6, in_use=1, limit=7)
+
class QuotaSerializerTest(test.TestCase):
self.assertIsInstance(services.c.active_backend_id.type,
self.VARCHAR_TYPE)
+ def _check_066(self, engine, data):
+ reservations = db_utils.get_table(engine, 'reservations')
+ self.assertIsInstance(reservations.c.allocated_id.type,
+ self.INTEGER_TYPE)
+
def test_walk_versions(self):
self.walk_versions(False, False)
# License for the specific language governing permissions and limitations
# under the License.
-
import datetime
import mock
class FakeProject(object):
def __init__(self, parent_id):
self.parent_id = parent_id
+ self.parents = {parent_id: None}
- def fake_get_project(project_id, subtree_as_ids=False):
+ def fake_get_project(project_id, subtree_as_ids=False,
+ parents_as_ids=False):
# Enable imitation of projects with and without parents
if project_id == self._child_proj_id:
return FakeProject('parent_id')
self.resources, self.project_tree)
self.proj_vals['D']['limit'] = 2
- def test_validate_nested_quotas_negative_child_limit(self):
- self.proj_vals['B']['limit'] = -1
- self.assertRaises(
- exception.InvalidNestedQuotaSetup,
- self.driver.validate_nested_setup,
- self.context, self.resources, self.project_tree)
-
def test_validate_nested_quotas_usage_over_limit(self):
-
self.proj_vals['D']['in_use'] = 5
self.assertRaises(exception.InvalidNestedQuotaSetup,
self.driver.validate_nested_setup,
self.context, self.resources, self.project_tree)
def test_validate_nested_quota_bad_allocated_quotas(self):
-
self.proj_vals['A']['alloc'] = 5
self.proj_vals['B']['alloc'] = 8
self.assertRaises(exception.InvalidNestedQuotaSetup,
self.driver.validate_nested_setup,
self.context, self.resources, self.project_tree)
+ def test_validate_nested_quota_negative_child_limits(self):
+ # Redefining the project limits with -1, doing it all in this test
+ # for readability
+ self.proj_vals = {
+ 'A': {'limit': 8, 'in_use': 1},
+ 'B': {'limit': -1, 'in_use': 3},
+ 'D': {'limit': 4, 'in_use': 0},
+ 'C': {'limit': 2, 'in_use': 2},
+ }
+
+ # A's child usage is 3 (from B) + 4 (from D) + 2 (from C) = 9
+ self.assertRaises(exception.InvalidNestedQuotaSetup,
+ self.driver.validate_nested_setup,
+ self.context, self.resources, self.project_tree)
+
+ self.proj_vals['D']['limit'] = 2
+ self.driver.validate_nested_setup(
+ self.context, self.resources, self.project_tree,
+ fix_allocated_quotas=True)
+
+ def test_get_cur_project_allocated(self):
+ # Redefining the project limits with -1, doing it all in this test
+ # for readability
+ self.proj_vals = {
+ # Allocated are here to simulate a bad existing value
+ 'A': {'limit': 8, 'in_use': 1, 'alloc': 6},
+ 'B': {'limit': -1, 'in_use': 3, 'alloc': 2},
+ 'D': {'limit': 1, 'in_use': 0},
+ 'C': {'limit': 2, 'in_use': 2},
+ }
+
+ self.driver._allocated = {}
+ allocated_a = self.driver._get_cur_project_allocated(
+ self.context, self.resources['volumes'],
+ self.project_tree)
+
+ # A's allocated will be:
+ # 2 (from C's limit) + 3 (from B's in-use) + 1 (from D's limit) = 6
+ self.assertEqual(6, allocated_a)
+
+ # B's allocated value should also be calculated and cached as part
+ # of A's calculation
+ self.assertEqual(1, self.driver._allocated['B']['volumes'])
+
class FakeSession(object):
def begin(self):
def __exit__(self, exc_type, exc_value, exc_traceback):
return False
+ def query(self, *args, **kwargs):
+ pass
+
class FakeUsage(sqa_models.QuotaUsage):
def save(self, *args, **kwargs):
return quota_usage_ref
def fake_reservation_create(context, uuid, usage_id, project_id,
- resource, delta, expire, session=None):
+ resource, delta, expire, session=None,
+ allocated_id=None):
reservation_ref = self._make_reservation(
uuid, usage_id, project_id, resource, delta, expire,
- timeutils.utcnow(), timeutils.utcnow())
+ timeutils.utcnow(), timeutils.utcnow(), allocated_id)
self.reservations_created[resource] = reservation_ref
return reservation_ref
+ def fake_qagabp(context, project_id):
+ self.assertEqual('test_project', project_id)
+ return {'project_id': project_id}
+
+ self.stubs.Set(sqa_api, 'quota_allocated_get_all_by_project',
+ fake_qagabp)
self.stubs.Set(sqa_api, 'get_session', fake_get_session)
self.stubs.Set(sqa_api, '_get_quota_usages', fake_get_quota_usages)
self.stubs.Set(sqa_api, '_quota_usage_create', fake_quota_usage_create)
(actual, value, resource))
def _make_reservation(self, uuid, usage_id, project_id, resource,
- delta, expire, created_at, updated_at):
+ delta, expire, created_at, updated_at, alloc_id):
reservation_ref = sqa_models.Reservation()
reservation_ref.id = len(self.reservations_created)
reservation_ref.uuid = uuid
reservation_ref.updated_at = updated_at
reservation_ref.deleted_at = None
reservation_ref.deleted = False
+ reservation_ref.allocated_id = alloc_id
return reservation_ref
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id, subtree_as_ids=True)
keystoneclient.projects.get.assert_called_once_with(
- self.context.project_id, subtree_as_ids=True)
+ self.context.project_id, parents_as_ids=False, subtree_as_ids=True)
self.assertEqual(expected_project.__dict__, project.__dict__)
@mock.patch('cinder.quota_utils._keystone_client')
usages = e.kwargs['usages']
def _consumed(name):
- return usages[name]['reserved'] + usages[name]['in_use']
+ usage = usages[name]
+ return usage['reserved'] + usage['in_use'] + usage.get(
+ 'allocated', 0)
def _get_over(name):
for over in overs:
over_name = _get_over('gigabytes')
exceeded_vol_limit_name = _get_over('volumes')
if over_name:
+ # TODO(mc_nair): improve error message for child -1 limit
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG volume (%(d_consumed)dG "
"of %(d_quota)dG already consumed)")
's_pid': context.project_id,
'd_consumed':
_consumed(exceeded_vol_limit_name)})
+ # TODO(mc_nair): improve error message for child -1 limit
raise exception.VolumeLimitExceeded(
allowed=quotas[exceeded_vol_limit_name],
name=exceeded_vol_limit_name)