'totalVolumesUsed': 'volumes',
'totalGigabytesUsed': 'gigabytes',
'totalSnapshotsUsed': 'snapshots',
+ 'totalBackupsUsed': 'backups',
+ 'totalBackupGigabytesUsed': 'backup_gigabytes'
}
used_limits = {}
"instances": ["maxTotalInstances"],
"cores": ["maxTotalCores"],
"gigabytes": ["maxTotalVolumeGigabytes"],
+ "backup_gigabytes": ["maxTotalBackupGigabytes"],
"volumes": ["maxTotalVolumes"],
"snapshots": ["maxTotalSnapshots"],
+ "backups": ["maxTotalBackups"],
"key_pairs": ["maxTotalKeypairs"],
"floating_ips": ["maxTotalFloatingIps"],
"metadata_items": ["maxServerMeta", "maxImageMeta"],
from cinder.db import base
from cinder import exception
from cinder.i18n import _
+from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
import cinder.policy
+from cinder import quota
from cinder import utils
import cinder.volume
from cinder.volume import utils as volume_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
+QUOTAS = quota.QUOTAS
def check_policy(context, action):
if not self._is_backup_service_enabled(volume, volume_host):
raise exception.ServiceNotFound(service_id='cinder-backup')
- self.db.volume_update(context, volume_id, {'status': 'backing-up'})
+ # do quota reserver before setting volume status and backup status
+ try:
+ reserve_opts = {'backups': 1,
+ 'backup_gigabytes': volume['size']}
+ reservations = QUOTAS.reserve(context, **reserve_opts)
+ except exception.OverQuota as e:
+ overs = e.kwargs['overs']
+ usages = e.kwargs['usages']
+ quotas = e.kwargs['quotas']
+
+ def _consumed(resource_name):
+ return (usages[resource_name]['reserved'] +
+ usages[resource_name]['in_use'])
+
+ for over in overs:
+ if 'gigabytes' in over:
+ msg = _("Quota exceeded for %(s_pid)s, tried to create "
+ "%(s_size)sG backup (%(d_consumed)dG of "
+ "%(d_quota)dG already consumed)")
+ LOG.warn(msg % {'s_pid': context.project_id,
+ 's_size': volume['size'],
+ 'd_consumed': _consumed(over),
+ 'd_quota': quotas[over]})
+ raise exception.VolumeBackupSizeExceedsAvailableQuota(
+ requested=volume['size'],
+ consumed=_consumed('backup_gigabytes'),
+ quota=quotas['backup_gigabytes'])
+ elif 'backups' in over:
+ msg = _("Quota exceeded for %(s_pid)s, tried to create "
+ "backups (%(d_consumed)d backups "
+ "already consumed)")
+
+ LOG.warn(msg % {'s_pid': context.project_id,
+ 'd_consumed': _consumed(over)})
+ raise exception.BackupLimitExceeded(
+ allowed=quotas[over])
+ self.db.volume_update(context, volume_id, {'status': 'backing-up'})
options = {'user_id': context.user_id,
'project_id': context.project_id,
'display_name': name,
'container': container,
'size': volume['size'],
'host': volume_host, }
-
- backup = self.db.backup_create(context, options)
+ try:
+ backup = self.db.backup_create(context, options)
+ QUOTAS.commit(context, reservations)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ try:
+ self.db.backup_destroy(context, backup['id'])
+ finally:
+ QUOTAS.rollback(context, reservations)
#TODO(DuncanT): In future, when we have a generic local attach,
# this can go via the scheduler, which enables
from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
+from cinder import quota
from cinder import utils
from cinder.volume import utils as volume_utils
CONF = cfg.CONF
CONF.register_opts(backup_manager_opts)
+QUOTAS = quota.QUOTAS
class BackupManager(manager.SchedulerDependentManager):
actual_status = backup['status']
if actual_status != expected_status:
err = _('Delete_backup aborted, expected backup status '
- '%(expected_status)s but got %(actual_status)s.') % {
- 'expected_status': expected_status,
- 'actual_status': actual_status,
- }
- self.db.backup_update(context, backup_id, {'status': 'error',
- 'fail_reason': err})
+ '%(expected_status)s but got %(actual_status)s.') \
+ % {'expected_status': expected_status,
+ 'actual_status': actual_status}
+ self.db.backup_update(context, backup_id,
+ {'status': 'error', 'fail_reason': err})
raise exception.InvalidBackup(reason=err)
backup_service = self._map_service_to_driver(backup['service'])
err = _('Delete backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
- ' backup [%(backup_service)s].') % {
- 'configured_service': configured_service,
- 'backup_service': backup_service,
- }
+ ' backup [%(backup_service)s].')\
+ % {'configured_service': configured_service,
+ 'backup_service': backup_service}
self.db.backup_update(context, backup_id,
{'status': 'error'})
raise exception.InvalidBackup(reason=err)
'fail_reason':
unicode(err)})
+ # Get reservations
+ try:
+ reserve_opts = {
+ 'backups': -1,
+ 'backup_gigabytes': -backup['size'],
+ }
+ reservations = QUOTAS.reserve(context,
+ project_id=backup['project_id'],
+ **reserve_opts)
+ except Exception:
+ reservations = None
+ LOG.exception(_("Failed to update usages deleting backup"))
+
context = context.elevated()
self.db.backup_destroy(context, backup_id)
+
+ # Commit the reservations
+ if reservations:
+ QUOTAS.commit(context, reservations,
+ project_id=backup['project_id'])
+
LOG.info(_('Delete backup finished, backup %s deleted.'), backup_id)
def export_record(self, context, backup_id):
return {key: snapshots}
+def _sync_backups(context, project_id, session, volume_type_id=None,
+ volume_type_name=None):
+ (backups, gigs) = _backup_data_get_for_project(
+ context, project_id, volume_type_id=volume_type_id, session=session)
+ key = 'backups'
+ return {key: backups}
+
+
def _sync_gigabytes(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(_junk, vol_gigs) = _volume_data_get_for_project(
key = 'consistencygroups'
return {key: groups}
+
+def _sync_backup_gigabytes(context, project_id, session, volume_type_id=None,
+ volume_type_name=None):
+ key = 'backup_gigabytes'
+ (_junk, backup_gigs) = _backup_data_get_for_project(
+ context, project_id, volume_type_id=volume_type_id, session=session)
+ return {key: backup_gigs}
+
+
QUOTA_SYNC_FUNCTIONS = {
'_sync_volumes': _sync_volumes,
'_sync_snapshots': _sync_snapshots,
'_sync_gigabytes': _sync_gigabytes,
'_sync_consistencygroups': _sync_consistencygroups,
+ '_sync_backups': _sync_backups,
+ '_sync_backup_gigabytes': _sync_backup_gigabytes
}
return (result[0] or 0, result[1] or 0)
+@require_admin_context
+def _backup_data_get_for_project(context, project_id, volume_type_id=None,
+ session=None):
+ query = model_query(context,
+ func.count(models.Backup.id),
+ func.sum(models.Backup.size),
+ read_deleted="no",
+ session=session).\
+ filter_by(project_id=project_id)
+
+ if volume_type_id:
+ query = query.filter_by(volume_type_id=volume_type_id)
+
+ result = query.first()
+
+ # NOTE(vish): convert None to 0
+ return (result[0] or 0, result[1] or 0)
+
+
@require_admin_context
def volume_data_get_for_project(context, project_id, volume_type_id=None):
return _volume_data_get_for_project(context, project_id, volume_type_id)
"%(consumed)sG has been consumed.")
+class VolumeBackupSizeExceedsAvailableQuota(QuotaError):
+ message = _("Requested backup exceeds allowed Backup Gigabytes "
+ "quota. Requested %(requested)sG, quota is %(quota)sG and "
+ "%(consumed)sG has been consumed.")
+
+
class VolumeLimitExceeded(QuotaError):
message = _("Maximum number of volumes allowed (%(allowed)d) exceeded")
message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded")
+class BackupLimitExceeded(QuotaError):
+ message = _("Maximum number of backups allowed (%(allowed)d) exceeded")
+
+
class DuplicateSfVolumeNames(Duplicate):
message = _("Detected more than one volume with name %(vol_name)s")
default=1000,
help='Total amount of storage, in gigabytes, allowed '
'for volumes and snapshots per project'),
+ cfg.IntOpt('quota_backups',
+ default=10,
+ help='Number of volume backups allowed per project'),
+ cfg.IntOpt('quota_backup_gigabytes',
+ default=1000,
+ help='Total amount of storage, in gigabytes, allowed '
+ 'for backups per project'),
cfg.IntOpt('reservation_expire',
default=86400,
help='Number of seconds until a reservation expires'),
default_quotas = {}
if CONF.use_default_quota_class:
default_quotas = db.quota_class_get_default(context)
+
for resource in resources.values():
if resource.name not in default_quotas:
LOG.deprecated(_("Default quota for resource: %(res)s is set "
# Global quotas.
argses = [('volumes', '_sync_volumes', 'quota_volumes'),
('snapshots', '_sync_snapshots', 'quota_snapshots'),
- ('gigabytes', '_sync_gigabytes', 'quota_gigabytes'), ]
+ ('gigabytes', '_sync_gigabytes', 'quota_gigabytes'),
+ ('backups', '_sync_backups', 'quota_backups'),
+ ('backup_gigabytes', '_sync_backup_gigabytes',
+ 'quota_backup_gigabytes')]
for args in argses:
resource = ReservableResource(*args)
result[resource.name] = resource
def make_body(root=True, gigabytes=1000, snapshots=10,
- volumes=10, tenant_id='foo'):
+ volumes=10, backups=10, backup_gigabytes=1000,
+ tenant_id='foo'):
resources = {'gigabytes': gigabytes,
'snapshots': snapshots,
- 'volumes': volumes}
+ 'volumes': volumes,
+ 'backups': backups,
+ 'backup_gigabytes': backup_gigabytes}
# need to consider preexisting volume types as well
volume_types = db.volume_type_get_all(context.get_admin_context())
for volume_type in volume_types:
def test_update(self):
body = make_body(gigabytes=2000, snapshots=15,
- volumes=5, tenant_id=None)
+ volumes=5, backups=5, tenant_id=None)
result = self.controller.update(self.req, 'foo', body)
self.assertDictMatch(result, body)
self.assertDictMatch(result_show, make_body())
body = make_body(gigabytes=2000, snapshots=15,
- volumes=5, tenant_id=None)
+ volumes=5, backups=5,
+ backup_gigabytes=1000, tenant_id=None)
result_update = self.controller.update(self.req, 'foo', body)
self.assertDictMatch(result_update, body)
def make_body(root=True, gigabytes=1000, snapshots=10,
- volumes=10, volume_types_faked=None,
+ volumes=10, backups=10,
+ backup_gigabytes=1000,
+ volume_types_faked=None,
tenant_id='foo'):
resources = {'gigabytes': gigabytes,
'snapshots': snapshots,
- 'volumes': volumes}
+ 'volumes': volumes,
+ 'backups': backups,
+ 'backup_gigabytes': backup_gigabytes}
if not volume_types_faked:
volume_types_faked = {'fake_type': None}
for volume_type in volume_types_faked:
import mock
from oslo.config import cfg
+from cinder import backup
from cinder import context
from cinder import db
from cinder.db.sqlalchemy import api as sqa_api
self.flags(quota_volumes=2,
quota_snapshots=2,
- quota_gigabytes=20)
+ quota_gigabytes=20,
+ quota_backups=2,
+ quota_backup_gigabytes=20)
self.user_id = 'admin'
self.project_id = 'admin'
snapshot['status'] = 'available'
return db.snapshot_create(self.context, snapshot)
+ def _create_backup(self, volume):
+ backup = {}
+ backup['user_id'] = self.user_id
+ backup['project_id'] = self.project_id
+ backup['volume_id'] = volume['id']
+ backup['volume_size'] = volume['size']
+ backup['status'] = 'available'
+ return db.backup_create(self.context, backup)
+
def test_too_many_volumes(self):
volume_ids = []
for i in range(CONF.quota_volumes):
db.snapshot_destroy(self.context, snap_ref['id'])
db.volume_destroy(self.context, vol_ref['id'])
+ def test_too_many_backups(self):
+ resource = 'backups'
+ db.quota_class_create(self.context, 'default', resource, 1)
+ flag_args = {
+ 'quota_backups': 2000,
+ 'quota_backup_gigabytes': 2000
+ }
+ self.flags(**flag_args)
+ vol_ref = self._create_volume()
+ backup_ref = self._create_backup(vol_ref)
+ with mock.patch.object(backup.API, '_is_backup_service_enabled') as \
+ mock__is_backup_service_enabled:
+ mock__is_backup_service_enabled.return_value = True
+ self.assertRaises(exception.BackupLimitExceeded,
+ backup.API().create,
+ self.context,
+ 'name',
+ 'description',
+ vol_ref['id'],
+ 'container')
+ db.backup_destroy(self.context, backup_ref['id'])
+ db.volume_destroy(self.context, vol_ref['id'])
+
def test_too_many_gigabytes(self):
volume_ids = []
vol_ref = self._create_volume(size=20)
db.snapshot_destroy(self.context, snap_ref['id'])
db.volume_destroy(self.context, vol_ref['id'])
+ def test_too_many_combined_backup_gigabytes(self):
+ vol_ref = self._create_volume(size=10000)
+ backup_ref = self._create_backup(vol_ref)
+ with mock.patch.object(backup.API, '_is_backup_service_enabled') as \
+ mock__is_backup_service_enabled:
+ mock__is_backup_service_enabled.return_value = True
+ self.assertRaises(
+ exception.VolumeBackupSizeExceedsAvailableQuota,
+ backup.API().create,
+ context=self.context,
+ name='name',
+ description='description',
+ volume_id=vol_ref['id'],
+ container='container')
+ db.backup_destroy(self.context, backup_ref['id'])
+ db.volume_destroy(self.context, vol_ref['id'])
+
def test_no_snapshot_gb_quota_flag(self):
self.flags(quota_volumes=2,
quota_snapshots=2,
db.volume_destroy(self.context, vol_ref['id'])
db.volume_destroy(self.context, vol_ref2['id'])
+ def test_backup_gb_quota_flag(self):
+ self.flags(quota_volumes=2,
+ quota_snapshots=2,
+ quota_backups=2,
+ quota_gigabytes=20
+ )
+ vol_ref = self._create_volume(size=10)
+ backup_ref = self._create_backup(vol_ref)
+ with mock.patch.object(backup.API, '_is_backup_service_enabled') as \
+ mock__is_backup_service_enabled:
+ mock__is_backup_service_enabled.return_value = True
+ backup_ref2 = backup.API().create(self.context,
+ 'name',
+ 'description',
+ vol_ref['id'],
+ 'container')
+
+ # Make sure the backup volume_size isn't included in usage.
+ vol_ref2 = volume.API().create(self.context, 10, '', '')
+ usages = db.quota_usage_get_all_by_project(self.context,
+ self.project_id)
+ self.assertEqual(usages['gigabytes']['in_use'], 20)
+ self.assertEqual(usages['gigabytes']['reserved'], 0)
+
+ db.backup_destroy(self.context, backup_ref['id'])
+ db.backup_destroy(self.context, backup_ref2['id'])
+ db.volume_destroy(self.context, vol_ref['id'])
+ db.volume_destroy(self.context, vol_ref2['id'])
+
def test_too_many_gigabytes_of_type(self):
resource = 'gigabytes_%s' % self.volume_type_name
db.quota_class_create(self.context, 'default', resource, 10)
engine = quota.VolumeTypeQuotaEngine()
self.assertEqual(engine.resource_names,
- ['gigabytes', 'snapshots', 'volumes'])
+ ['backup_gigabytes', 'backups',
+ 'gigabytes', 'snapshots', 'volumes'])
def test_volume_type_resources(self):
ctx = context.RequestContext('admin', 'admin', is_admin=True)
engine = quota.VolumeTypeQuotaEngine()
self.assertEqual(engine.resource_names,
- ['gigabytes', 'gigabytes_type1', 'gigabytes_type_2',
+ ['backup_gigabytes', 'backups',
+ 'gigabytes', 'gigabytes_type1', 'gigabytes_type_2',
'snapshots', 'snapshots_type1', 'snapshots_type_2',
'volumes', 'volumes_type1', 'volumes_type_2'])
db.volume_type_destroy(ctx, vtype['id'])
self.flags(quota_volumes=10,
quota_snapshots=10,
quota_gigabytes=1000,
+ quota_backups=10,
+ quota_backup_gigabytes=1000,
reservation_expire=86400,
until_refresh=0,
max_age=0,
dict(
volumes=10,
snapshots=10,
- gigabytes=1000, ))
+ gigabytes=1000,
+ backups=10,
+ backup_gigabytes=1000))
def _stub_quota_class_get_default(self):
# Stub out quota_class_get_default
self.calls.append('quota_class_get_default')
return dict(volumes=10,
snapshots=10,
- gigabytes=1000,)
+ gigabytes=1000,
+ backups=10,
+ backup_gigabytes=1000
+ )
self.stubs.Set(db, 'quota_class_get_default', fake_qcgd)
def _stub_volume_type_get_all(self):
def fake_qcgabn(context, quota_class):
self.calls.append('quota_class_get_all_by_name')
self.assertEqual(quota_class, 'test_class')
- return dict(gigabytes=500, volumes=10, snapshots=10, )
+ return dict(gigabytes=500, volumes=10, snapshots=10, backups=10,
+ backup_gigabytes=500)
self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn)
def test_get_class_quotas(self):
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
self.assertEqual(result, dict(volumes=10,
gigabytes=500,
- snapshots=10))
+ snapshots=10,
+ backups=10,
+ backup_gigabytes=500))
def test_get_class_quotas_no_defaults(self):
self._stub_quota_class_get_all_by_name()
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
self.assertEqual(result, dict(volumes=10,
gigabytes=500,
- snapshots=10))
+ snapshots=10,
+ backups=10,
+ backup_gigabytes=500))
def _stub_get_by_project(self):
def fake_qgabp(context, project_id):
self.calls.append('quota_get_all_by_project')
self.assertEqual(project_id, 'test_project')
- return dict(volumes=10, gigabytes=50, reserved=0, snapshots=10)
+ return dict(volumes=10, gigabytes=50, reserved=0,
+ snapshots=10, backups=10,
+ backup_gigabytes=50)
def fake_qugabp(context, project_id):
self.calls.append('quota_usage_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return dict(volumes=dict(in_use=2, reserved=0),
snapshots=dict(in_use=2, reserved=0),
- gigabytes=dict(in_use=10, reserved=0), )
+ gigabytes=dict(in_use=10, reserved=0),
+ backups=dict(in_use=2, reserved=0),
+ backup_gigabytes=dict(in_use=10, reserved=0)
+ )
self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp)
reserved=0, ),
gigabytes=dict(limit=50,
in_use=10,
- reserved=0, ), ))
+ reserved=0, ),
+ backups=dict(limit=10,
+ in_use=2,
+ reserved=0, ),
+ backup_gigabytes=dict(limit=50,
+ in_use=10,
+ reserved=0, ),
+ ))
def test_get_project_quotas_alt_context_no_class(self):
self._stub_get_by_project()
reserved=0, ),
gigabytes=dict(limit=50,
in_use=10,
- reserved=0, ), ))
+ reserved=0, ),
+ backups=dict(limit=10,
+ in_use=2,
+ reserved=0, ),
+ backup_gigabytes=dict(limit=50,
+ in_use=10,
+ reserved=0, ),
+ ))
def test_get_project_quotas_alt_context_with_class(self):
self._stub_get_by_project()
reserved=0, ),
gigabytes=dict(limit=50,
in_use=10,
- reserved=0, ), ))
+ reserved=0, ),
+ backups=dict(limit=10,
+ in_use=2,
+ reserved=0, ),
+ backup_gigabytes=dict(limit=50,
+ in_use=10,
+ reserved=0, ),
+ ))
def test_get_project_quotas_no_defaults(self):
self._stub_get_by_project()
'quota_class_get_all_by_name',
'quota_class_get_default', ])
self.assertEqual(result,
- dict(gigabytes=dict(limit=50,
+ dict(backups=dict(limit=10,
+ in_use=2,
+ reserved=0, ),
+ backup_gigabytes=dict(limit=50,
+ in_use=10,
+ reserved=0, ),
+ gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
snapshots=dict(limit=10,
reserved=0, ),
volumes=dict(limit=10,
in_use=2,
- reserved=0, ), ))
+ reserved=0, ),
+
+ ))
def test_get_project_quotas_no_usages(self):
self._stub_get_by_project()
'quota_class_get_default', ])
self.assertEqual(result, dict(volumes=dict(limit=10, ),
snapshots=dict(limit=10, ),
- gigabytes=dict(limit=50, ), ))
+ backups=dict(limit=10, ),
+ gigabytes=dict(limit=50, ),
+ backup_gigabytes=dict(limit=50, ),))
def _stub_get_project_quotas(self):
def fake_get_project_quotas(context, resources, project_id,
# and snapshots per project (integer value)
#quota_gigabytes=1000
+# Number of volume backups allowed per project (integer value)
+#quota_backups=10
+
+# Total amount of storage, in gigabytes, allowed for backups
+# per project (integer value)
+#quota_backup_gigabytes=1000
+
# Number of seconds until a reservation expires (integer
# value)
#reservation_expire=86400