This patch adds VersionedObjects abstraction layer to volume backups.
The object derives from CinderObjectDictCompat, so it supports both
object (obj.prop) and dict (obj['prop']) syntax to access properties.
Complete move to object notation will be made in a follow up clean up
patch.
Co-Authored-By: Grzegorz Grasza <grzegorz.grasza@intel.com>
Change-Id: Icff37261b367463b71a1268be16f9c97f595bf0c
Partial-Implements: blueprint cinder-objects
backups = self.backup_api.get_all(context, search_opts=filters)
backup_count = len(backups)
- limited_list = common.limited(backups, req)
+ limited_list = common.limited(backups.objects, req)
req.cache_db_backups(limited_list)
if is_detail:
from cinder.db import base
from cinder import exception
from cinder.i18n import _, _LI, _LW
+from cinder import objects
import cinder.policy
from cinder import quota
from cinder import utils
def get(self, context, backup_id):
check_policy(context, 'get')
- rv = self.db.backup_get(context, backup_id)
- return dict(rv)
+ return objects.Backup.get_by_id(context, backup_id)
def delete(self, context, backup_id):
"""Make the RPC call to delete a volume backup."""
msg = _('Incremental backups exist for this backup.')
raise exception.InvalidBackup(reason=msg)
- self.db.backup_update(context, backup_id, {'status': 'deleting'})
- self.backup_rpcapi.delete_backup(context,
- backup['host'],
- backup['id'])
+ backup.status = 'deleting'
+ backup.save()
+ self.backup_rpcapi.delete_backup(context, backup)
def get_all(self, context, search_opts=None):
if search_opts is None:
search_opts = {}
check_policy(context, 'get_all')
+
if context.is_admin:
- backups = self.db.backup_get_all(context, filters=search_opts)
+ backups = objects.BackupList.get_all(context, filters=search_opts)
else:
- backups = self.db.backup_get_all_by_project(context,
- context.project_id,
- filters=search_opts)
+ backups = objects.BackupList.get_all_by_project(
+ context,
+ context.project_id,
+ filters=search_opts
+ )
return backups
# backup to do an incremental backup.
latest_backup = None
if incremental:
- backups = self.db.backup_get_all_by_volume(context.elevated(),
- volume_id)
- if backups:
- latest_backup = max(backups, key=lambda x: x['created_at'])
+ backups = objects.BackupList.get_all_by_volume(context.elevated(),
+ volume_id)
+ if backups.objects:
+ latest_backup = max(backups.objects,
+ key=lambda x: x['created_at'])
else:
msg = _('No backups available to do an incremental backup.')
raise exception.InvalidBackup(reason=msg)
parent_id = None
if latest_backup:
- parent_id = latest_backup['id']
+ parent_id = latest_backup.id
if latest_backup['status'] != "available":
msg = _('The parent backup must be available for '
'incremental backup.')
raise exception.InvalidBackup(reason=msg)
self.db.volume_update(context, volume_id, {'status': 'backing-up'})
- options = {'user_id': context.user_id,
- 'project_id': context.project_id,
- 'display_name': name,
- 'display_description': description,
- 'volume_id': volume_id,
- 'status': 'creating',
- 'container': container,
- 'parent_id': parent_id,
- 'size': volume['size'],
- 'host': volume_host, }
try:
- backup = self.db.backup_create(context, options)
+ kwargs = {
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'display_name': name,
+ 'display_description': description,
+ 'volume_id': volume_id,
+ 'status': 'creating',
+ 'container': container,
+ 'parent_id': parent_id,
+ 'size': volume['size'],
+ 'host': volume_host,
+ }
+ backup = objects.Backup(context=context, **kwargs)
+ backup.create()
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
- self.db.backup_destroy(context, backup['id'])
+ backup.destroy()
finally:
QUOTAS.rollback(context, reservations)
# TODO(DuncanT): In future, when we have a generic local attach,
# this can go via the scheduler, which enables
# better load balancing and isolation of services
- self.backup_rpcapi.create_backup(context,
- backup['host'],
- backup['id'],
- volume_id)
+ self.backup_rpcapi.create_backup(context, backup)
return backup
# Setting the status here rather than setting at start and unrolling
# for each error condition, it should be a very small window
- self.db.backup_update(context, backup_id, {'status': 'restoring'})
+ backup.status = 'restoring'
+ backup.save()
+ volume_host = volume_utils.extract_host(volume['host'], 'host')
self.db.volume_update(context, volume_id, {'status':
'restoring-backup'})
- volume_host = volume_utils.extract_host(volume['host'], 'host')
- self.backup_rpcapi.restore_backup(context,
- volume_host,
- backup['id'],
+ self.backup_rpcapi.restore_backup(context, volume_host, backup,
volume_id)
d = {'backup_id': backup_id,
# get backup info
backup = self.get(context, backup_id)
# send to manager to do reset operation
- self.backup_rpcapi.reset_status(ctxt=context, host=backup['host'],
- backup_id=backup_id, status=status)
+ self.backup_rpcapi.reset_status(ctxt=context, backup=backup,
+ status=status)
def export_record(self, context, backup_id):
"""Make the RPC call to export a volume backup.
{'ctx': context,
'host': backup['host'],
'id': backup['id']})
- export_data = self.backup_rpcapi.export_record(context,
- backup['host'],
- backup['id'])
+ export_data = self.backup_rpcapi.export_record(context, backup)
return export_data
if len(hosts) == 0:
raise exception.ServiceNotFound(service_id=backup_service)
- options = {'user_id': context.user_id,
- 'project_id': context.project_id,
- 'volume_id': '0000-0000-0000-0000',
- 'status': 'creating', }
- backup = self.db.backup_create(context, options)
+ kwargs = {
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'volume_id': '0000-0000-0000-0000',
+ 'status': 'creating',
+ }
+ backup = objects.Backup(context=context, **kwargs)
+ backup.create()
first_host = hosts.pop()
self.backup_rpcapi.import_record(context,
first_host,
- backup['id'],
+ backup,
backup_service,
backup_url,
hosts)
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
+from cinder import objects
from cinder.openstack.common import loopingcall
from cinder.volume import utils as volume_utils
return
def _create_container(self, context, backup):
- backup_id = backup['id']
- backup['container'] = self.update_container_name(backup,
- backup['container'])
- container = backup['container']
+ backup.container = self.update_container_name(backup, backup.container)
LOG.debug('_create_container started, container: %(container)s,'
'backup: %(backup_id)s.',
- {'container': container, 'backup_id': backup_id})
- if container is None:
- container = self.backup_default_container
- self.db.backup_update(context, backup_id, {'container': container})
- self.put_container(container)
- return container
+ {'container': backup.container, 'backup_id': backup.id})
+ if backup.container is None:
+ backup.container = self.backup_default_container
+ backup.save()
+ self.put_container(backup.container)
+ return backup.container
def _generate_object_names(self, backup):
prefix = backup['service_metadata']
def _prepare_backup(self, backup):
"""Prepare the backup process and return the backup metadata."""
- backup_id = backup['id']
- volume_id = backup['volume_id']
- volume = self.db.volume_get(self.context, volume_id)
+ volume = self.db.volume_get(self.context, backup.volume_id)
if volume['size'] <= 0:
err = _('volume size %d is invalid.') % volume['size']
container = self._create_container(self.context, backup)
object_prefix = self._generate_object_name_prefix(backup)
- backup['service_metadata'] = object_prefix
- self.db.backup_update(self.context, backup_id, {'service_metadata':
- object_prefix})
+ backup.service_metadata = object_prefix
+ backup.save()
+
volume_size_bytes = volume['size'] * units.Gi
availability_zone = self.az
LOG.debug('starting backup of volume: %(volume_id)s,'
' prefix %(object_prefix)s, availability zone:'
' %(availability_zone)s',
{
- 'volume_id': volume_id,
+ 'volume_id': backup.volume_id,
'volume_size_bytes': volume_size_bytes,
'object_prefix': object_prefix,
'availability_zone': availability_zone,
sha256_list = object_sha256['sha256s']
extra_metadata = object_meta.get('extra_metadata')
self._write_sha256file(backup,
- backup['volume_id'],
+ backup.volume_id,
container,
sha256_list)
self._write_metadata(backup,
- backup['volume_id'],
+ backup.volume_id,
container,
object_list,
volume_meta,
extra_metadata)
- self.db.backup_update(self.context, backup['id'],
- {'object_count': object_id})
+ backup.object_count = object_id
+ backup.save()
LOG.debug('backup %s finished.', backup['id'])
def _backup_metadata(self, backup, object_meta):
# is given.
parent_backup_shafile = None
parent_backup = None
- if backup['parent_id']:
- parent_backup = self.db.backup_get(self.context,
- backup['parent_id'])
+ if backup.parent_id:
+ parent_backup = objects.Backup.get_by_id(self.context,
+ backup.parent_id)
parent_backup_shafile = self._read_sha256file(parent_backup)
parent_backup_shalist = parent_backup_shafile['sha256s']
if (parent_backup_shafile['chunk_size'] !=
raise exception.InvalidBackup(reason=err)
# If the volume size increased since the last backup, fail
# the incremental backup and ask user to do a full backup.
- if backup['size'] > parent_backup['size']:
+ if backup.size > parent_backup.size:
err = _('Volume size increased since the last '
'backup. Do a full backup.')
raise exception.InvalidBackup(reason=err)
backup_list = []
backup_list.append(backup)
current_backup = backup
- while current_backup['parent_id']:
- prev_backup = (self.db.backup_get(
- self.context, current_backup['parent_id']))
+ while current_backup.parent_id:
+ prev_backup = objects.Backup.get_by_id(self.context,
+ current_backup.parent_id)
backup_list.append(prev_backup)
current_backup = prev_backup
self._full_backup(backup_id, volume_id, volume_file,
volume_name, length)
- self.db.backup_update(self.context, backup_id,
- {'container': self._ceph_backup_pool})
+ backup.container = self._ceph_backup_pool
+ backup.save()
if backup_metadata:
try:
def _get_backup_metadata(backup, operation):
"""Return metadata persisted with backup object."""
- svc_metadata = backup['service_metadata']
try:
- svc_dict = json.loads(svc_metadata)
+ svc_dict = json.loads(backup.service_metadata)
backup_path = svc_dict.get('backup_path')
backup_mode = svc_dict.get('backup_mode')
except TypeError:
"not yet support this feature.")
raise exception.InvalidBackup(reason=msg)
- backup_id = backup['id']
- volume_id = backup['volume_id']
volume_path, backup_mode = _get_volume_realpath(volume_file,
- volume_id)
+ backup.volume_id)
LOG.debug('Starting backup of volume: %(volume_id)s to TSM,'
' volume path: %(volume_path)s, mode: %(mode)s.',
- {'volume_id': volume_id,
+ {'volume_id': backup.volume_id,
'volume_path': volume_path,
'mode': backup_mode})
- backup_path = _create_unique_device_link(backup_id,
+ backup_path = _create_unique_device_link(backup.id,
volume_path,
- volume_id,
+ backup.volume_id,
backup_mode)
service_metadata = {'backup_mode': backup_mode,
'backup_path': backup_path}
- self.db.backup_update(self.context,
- backup_id,
- {'service_metadata':
- json.dumps(service_metadata)})
+ backup.service_metadata = json.dumps(service_metadata)
+ backup.save()
try:
- self._do_backup(backup_path, volume_id, backup_mode)
+ self._do_backup(backup_path, backup.volume_id, backup_mode)
except processutils.ProcessExecutionError as exc:
err = (_('backup: %(vol_id)s failed to run dsmc '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
- % {'vol_id': volume_id,
+ % {'vol_id': backup.volume_id,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
'due to invalid arguments '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
- % {'vol_id': volume_id,
+ % {'vol_id': backup.volume_id,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
raise exception.InvalidBackup(reason=err)
finally:
- _cleanup_device_hardlink(backup_path, volume_path, volume_id)
+ _cleanup_device_hardlink(backup_path, volume_path,
+ backup.volume_id)
- LOG.debug('Backup %s finished.', backup_id)
+ LOG.debug('Backup %s finished.', backup.id)
def restore(self, backup, volume_id, volume_file):
"""Restore the given volume backup from TSM server.
:raises InvalidBackup
"""
- backup_id = backup['id']
-
# backup_path is the path that was originally backed up.
backup_path, backup_mode = _get_backup_metadata(backup, 'restore')
'backup: %(backup_id)s, '
'mode: %(mode)s.',
{'volume_id': volume_id,
- 'backup_id': backup_id,
+ 'backup_id': backup.id,
'mode': backup_mode})
# volume_path is the path to restore into. This may
volume_path, unused = _get_volume_realpath(volume_file,
volume_id)
- restore_path = _create_unique_device_link(backup_id,
+ restore_path = _create_unique_device_link(backup.id,
volume_path,
volume_id,
backup_mode)
_cleanup_device_hardlink(restore_path, volume_path, volume_id)
LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.',
- {'backup_id': backup_id,
+ {'backup_id': backup.id,
'volume_id': volume_id})
def delete(self, backup):
delete_attrs = {'Total number of objects deleted': '1'}
delete_path, backup_mode = _get_backup_metadata(backup, 'restore')
- volume_id = backup['volume_id']
LOG.debug('Delete started for backup: %(backup)s, mode: %(mode)s.',
- {'backup': backup['id'],
+ {'backup': backup.id,
'mode': backup_mode})
try:
except processutils.ProcessExecutionError as exc:
err = (_('delete: %(vol_id)s failed to run dsmc with '
'stdout: %(out)s\n stderr: %(err)s')
- % {'vol_id': volume_id,
+ % {'vol_id': backup.volume_id,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
err = (_('delete: %(vol_id)s failed to run dsmc '
'due to invalid arguments with '
'stdout: %(out)s\n stderr: %(err)s')
- % {'vol_id': volume_id,
+ % {'vol_id': backup.volume_id,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
# object can be removed.
LOG.error(_LE('delete: %(vol_id)s failed with '
'stdout: %(out)s\n stderr: %(err)s'),
- {'vol_id': volume_id,
+ {'vol_id': backup.volume_id,
'out': out,
'err': err})
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import manager
+from cinder import objects
from cinder import quota
from cinder import rpc
from cinder import utils
driver.set_initialized()
+ def _update_backup_error(self, backup, context, err):
+ backup.status = 'error'
+ backup.fail_reason = err
+ backup.save()
+
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
# TODO(smulcahy) implement full resume of backup and restore
# operations on restart (rather than simply resetting)
- backups = self.db.backup_get_all_by_host(ctxt, self.host)
+ backups = objects.BackupList.get_all_by_host(ctxt, self.host)
for backup in backups:
if backup['status'] == 'creating':
LOG.info(_LI('Resetting backup %s to error (was creating).'),
backup['id'])
err = 'incomplete backup reset on manager restart'
- self.db.backup_update(ctxt, backup['id'], {'status': 'error',
- 'fail_reason': err})
+ self._update_backup_error(backup, ctxt, err)
if backup['status'] == 'restoring':
LOG.info(_LI('Resetting backup %s to '
'available (was restoring).'),
backup['id'])
- self.db.backup_update(ctxt, backup['id'],
- {'status': 'available'})
+ backup.status = 'available'
+ backup.save()
if backup['status'] == 'deleting':
LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
- self.delete_backup(ctxt, backup['id'])
+ self.delete_backup(ctxt, backup)
- def create_backup(self, context, backup_id):
+ def create_backup(self, context, backup):
"""Create volume backups using configured backup service."""
- backup = self.db.backup_get(context, backup_id)
- volume_id = backup['volume_id']
+ volume_id = backup.volume_id
volume = self.db.volume_get(context, volume_id)
LOG.info(_LI('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'),
- {'backup_id': backup_id, 'volume_id': volume_id})
+ {'backup_id': backup.id, 'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "create.start")
volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host)
- self.db.backup_update(context, backup_id, {'host': self.host,
- 'service':
- self.driver_name})
+ backup.host = self.host
+ backup.service = self.driver_name
+ backup.save()
expected_status = 'backing-up'
actual_status = volume['status']
'expected_status': expected_status,
'actual_status': actual_status,
}
- self.db.backup_update(context, backup_id, {'status': 'error',
- 'fail_reason': err})
+ self._update_backup_error(backup, context, err)
raise exception.InvalidVolume(reason=err)
expected_status = 'creating'
- actual_status = backup['status']
+ actual_status = backup.status
if actual_status != expected_status:
err = _('Create backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
- self.db.volume_update(context, volume_id, {'status': 'available'})
- self.db.backup_update(context, backup_id, {'status': 'error',
- 'fail_reason': err})
+ self._update_backup_error(backup, context, err)
+ backup.save()
raise exception.InvalidBackup(reason=err)
try:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': 'available'})
- self.db.backup_update(context, backup_id,
- {'status': 'error',
- 'fail_reason': six.text_type(err)})
+ self._update_backup_error(backup, context, six.text_type(err))
self.db.volume_update(context, volume_id, {'status': 'available'})
- backup = self.db.backup_update(context, backup_id,
- {'status': 'available',
- 'size': volume['size'],
- 'availability_zone': self.az})
- LOG.info(_LI('Create backup finished. backup: %s.'), backup_id)
+ backup.status = 'available'
+ backup.size = volume['size']
+ backup.availability_zone = self.az
+ backup.save()
+ LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
self._notify_about_backup_usage(context, backup, "create.end")
- def restore_backup(self, context, backup_id, volume_id):
+ def restore_backup(self, context, backup, volume_id):
"""Restore volume backups from configured backup service."""
LOG.info(_LI('Restore backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'),
- {'backup_id': backup_id, 'volume_id': volume_id})
+ {'backup_id': backup.id, 'volume_id': volume_id})
- backup = self.db.backup_get(context, backup_id)
volume = self.db.volume_get(context, volume_id)
volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host)
self._notify_about_backup_usage(context, backup, "restore.start")
- self.db.backup_update(context, backup_id, {'host': self.host})
+ backup.host = self.host
+ backup.save()
expected_status = 'restoring-backup'
actual_status = volume['status']
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
- self.db.backup_update(context, backup_id, {'status': 'available'})
+ backup.status = 'available'
+ backup.save()
raise exception.InvalidVolume(reason=err)
expected_status = 'restoring'
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
- self.db.backup_update(context, backup_id, {'status': 'error',
- 'fail_reason': err})
+ self._update_backup_error(backup, context, err)
self.db.volume_update(context, volume_id, {'status': 'error'})
raise exception.InvalidBackup(reason=err)
'configured_service': configured_service,
'backup_service': backup_service,
}
- self.db.backup_update(context, backup_id, {'status': 'available'})
+ backup.status = 'available'
+ backup.save()
self.db.volume_update(context, volume_id, {'status': 'error'})
raise exception.InvalidBackup(reason=err)
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': 'error_restoring'})
- self.db.backup_update(context, backup_id,
- {'status': 'available'})
+ backup.status = 'available'
+ backup.save()
self.db.volume_update(context, volume_id, {'status': 'available'})
- backup = self.db.backup_update(context, backup_id,
- {'status': 'available'})
+ backup.status = 'available'
+ backup.save()
LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored'
' to volume %(volume_id)s.'),
- {'backup_id': backup_id, 'volume_id': volume_id})
+ {'backup_id': backup.id, 'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "restore.end")
- def delete_backup(self, context, backup_id):
+ def delete_backup(self, context, backup):
"""Delete volume backup from configured backup service."""
+ LOG.info(_LI('Delete backup started, backup: %s.'), backup.id)
+
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized as err:
with excutils.save_and_reraise_exception():
- self.db.backup_update(context, backup_id,
- {'status': 'error',
- 'fail_reason':
- six.text_type(err)})
+ self._update_backup_error(backup, context, six.text_type(err))
- LOG.info(_LI('Delete backup started, backup: %s.'), backup_id)
- backup = self.db.backup_get(context, backup_id)
self._notify_about_backup_usage(context, backup, "delete.start")
- self.db.backup_update(context, backup_id, {'host': self.host})
+ backup.host = self.host
+ backup.save()
expected_status = 'deleting'
- actual_status = backup['status']
+ actual_status = backup.status
if actual_status != expected_status:
err = _('Delete_backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') \
% {'expected_status': expected_status,
'actual_status': actual_status}
- self.db.backup_update(context, backup_id,
- {'status': 'error', 'fail_reason': err})
+ self._update_backup_error(backup, context, err)
raise exception.InvalidBackup(reason=err)
backup_service = self._map_service_to_driver(backup['service'])
' backup [%(backup_service)s].')\
% {'configured_service': configured_service,
'backup_service': backup_service}
- self.db.backup_update(context, backup_id,
- {'status': 'error'})
+ self._update_backup_error(backup, context, err)
raise exception.InvalidBackup(reason=err)
try:
backup_service.delete(backup)
except Exception as err:
with excutils.save_and_reraise_exception():
- self.db.backup_update(context, backup_id,
- {'status': 'error',
- 'fail_reason':
- six.text_type(err)})
+ self._update_backup_error(backup, context,
+ six.text_type(err))
# Get reservations
try:
reserve_opts = {
'backups': -1,
- 'backup_gigabytes': -backup['size'],
+ 'backup_gigabytes': -backup.size,
}
reservations = QUOTAS.reserve(context,
- project_id=backup['project_id'],
+ project_id=backup.project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting backup"))
context = context.elevated()
- self.db.backup_destroy(context, backup_id)
+ backup.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations,
- project_id=backup['project_id'])
+ project_id=backup.project_id)
- LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup_id)
+ LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup.id)
self._notify_about_backup_usage(context, backup, "delete.end")
def _notify_about_backup_usage(self,
extra_usage_info=extra_usage_info,
host=self.host)
- def export_record(self, context, backup_id):
+ def export_record(self, context, backup):
"""Export all volume backup metadata details to allow clean import.
Export backup metadata so it could be re-imported into the database
without any prerequisite in the backup database.
:param context: running context
- :param backup_id: backup id to export
+ :param backup: backup object to export
:returns: backup_record - a description of how to import the backup
:returns: contains 'backup_url' - how to import the backup, and
:returns: 'backup_service' describing the needed driver.
:raises: InvalidBackup
"""
- LOG.info(_LI('Export record started, backup: %s.'), backup_id)
-
- backup = self.db.backup_get(context, backup_id)
+ LOG.info(_LI('Export record started, backup: %s.'), backup.id)
expected_status = 'available'
- actual_status = backup['status']
+ actual_status = backup.status
if actual_status != expected_status:
err = (_('Export backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') %
raise exception.InvalidBackup(reason=err)
backup_record = {}
- backup_record['backup_service'] = backup['service']
- backup_service = self._map_service_to_driver(backup['service'])
+ backup_record['backup_service'] = backup.service
+ backup_service = self._map_service_to_driver(backup.service)
configured_service = self.driver_name
if backup_service != configured_service:
err = (_('Export record aborted, the backup service currently'
msg = six.text_type(err)
raise exception.InvalidBackup(reason=msg)
- LOG.info(_LI('Export record finished, backup %s exported.'), backup_id)
+ LOG.info(_LI('Export record finished, backup %s exported.'), backup.id)
return backup_record
def import_record(self,
context,
- backup_id,
+ backup,
backup_service,
backup_url,
backup_hosts):
"""Import all volume backup metadata details to the backup db.
:param context: running context
- :param backup_id: The new backup id for the import
+ :param backup: The new backup object for the import
:param backup_service: The needed backup driver for import
:param backup_url: An identifier string to locate the backup
:param backup_hosts: Potential hosts to execute the import
first_host = backup_hosts.pop()
self.backup_rpcapi.import_record(context,
first_host,
- backup_id,
+ backup.id,
backup_service,
backup_url,
backup_hosts)
err = _('Import record failed, cannot find backup '
'service to perform the import. Request service '
'%(service)s') % {'service': backup_service}
- self.db.backup_update(context, backup_id, {'status': 'error',
- 'fail_reason': err})
+ self._update_backup_error(backup, context, err)
raise exception.ServiceNotFound(service_id=backup_service)
else:
# Yes...
backup_options = backup_service.import_record(backup_url)
except Exception as err:
msg = six.text_type(err)
- self.db.backup_update(context,
- backup_id,
- {'status': 'error',
- 'fail_reason': msg})
+ self._update_backup_error(backup, context, msg)
raise exception.InvalidBackup(reason=msg)
required_import_options = ['display_name',
if entry not in backup_options:
msg = (_('Backup metadata received from driver for '
'import is missing %s.'), entry)
- self.db.backup_update(context,
- backup_id,
- {'status': 'error',
- 'fail_reason': msg})
+ self._update_backup_error(backup, context, msg)
raise exception.InvalidBackup(reason=msg)
backup_update[entry] = backup_options[entry]
# Update the database
- self.db.backup_update(context, backup_id, backup_update)
+ backup.update(backup_update)
+ backup.save()
# Verify backup
try:
if isinstance(backup_service, driver.BackupDriverWithVerify):
- backup_service.verify(backup_id)
+ backup_service.verify(backup.id)
else:
LOG.warning(_LW('Backup service %(service)s does not '
'support verify. Backup id %(id)s is '
'not verified. Skipping verify.'),
{'service': self.driver_name,
- 'id': backup_id})
+ 'id': backup.id})
except exception.InvalidBackup as err:
with excutils.save_and_reraise_exception():
- self.db.backup_update(context, backup_id,
- {'status': 'error',
- 'fail_reason':
- six.text_type(err)})
+ self._update_backup_error(backup, context,
+ six.text_type(err))
LOG.info(_LI('Import record id %s metadata from driver '
- 'finished.'), backup_id)
+ 'finished.'), backup.id)
- def reset_status(self, context, backup_id, status):
+ def reset_status(self, context, backup, status):
"""Reset volume backup status.
:param context: running context
- :param backup_id: The backup id for reset status operation
+ :param backup: The backup object for reset status operation
:param status: The status to be set
:raises: InvalidBackup
:raises: BackupVerifyUnsupportedDriver
"""
LOG.info(_LI('Reset backup status started, backup_id: '
'%(backup_id)s, status: %(status)s.'),
- {'backup_id': backup_id,
+ {'backup_id': backup.id,
'status': status})
try:
# NOTE(flaper87): Verify the driver is enabled
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Backup driver has not been initialized"))
- backup = self.db.backup_get(context, backup_id)
- backup_service = self._map_service_to_driver(backup['service'])
+ backup_service = self._map_service_to_driver(backup.service)
LOG.info(_LI('Backup service: %s.'), backup_service)
if backup_service is not None:
configured_service = self.driver_name
# check whether we could verify the backup is ok or not
if isinstance(backup_service,
driver.BackupDriverWithVerify):
- backup_service.verify(backup_id)
- self.db.backup_update(context, backup_id,
- {'status': status})
+ backup_service.verify(backup.id)
+ backup.status = status
+ backup.save()
# driver does not support verify function
else:
msg = (_('Backup service %(configured_service)s '
' %(id)s is not verified. '
'Skipping verify.') %
{'configured_service': self.driver_name,
- 'id': backup_id})
+ 'id': backup.id})
raise exception.BackupVerifyUnsupportedDriver(
reason=msg)
# reset status to error or from restoring to available
else:
if (status == 'error' or
(status == 'available' and
- backup['status'] == 'restoring')):
- self.db.backup_update(context, backup_id,
- {'status': status})
+ backup.status == 'restoring')):
+ backup.status = status
+ backup.save()
except exception.InvalidBackup:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Backup id %s is not invalid. "
- "Skipping reset."), backup_id)
+ "Skipping reset."), backup.id)
except exception.BackupVerifyUnsupportedDriver:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Backup service %(configured_service)s '
'%(id)s is not verified. '
'Skipping verify.'),
{'configured_service': self.driver_name,
- 'id': backup_id})
+ 'id': backup.id})
except AttributeError:
msg = (_('Backup service %(service)s does not support '
'verify. Backup id %(id)s is not verified. '
'Skipping reset.') %
{'service': self.driver_name,
- 'id': backup_id})
+ 'id': backup.id})
LOG.error(msg)
raise exception.BackupVerifyUnsupportedDriver(
reason=msg)
# send notification to ceilometer
- notifier_info = {'id': backup_id, 'update': {'status': status}}
+ notifier_info = {'id': backup.id, 'update': {'status': status}}
notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, "backups.reset_status.end",
notifier_info)
from oslo_log import log as logging
import oslo_messaging as messaging
+from cinder.objects import base as objects_base
from cinder import rpc
API version history:
1.0 - Initial version.
+ 1.1 - Changed methods to accept backup objects instaed of IDs.
"""
BASE_RPC_API_VERSION = '1.0'
super(BackupAPI, self).__init__()
target = messaging.Target(topic=CONF.backup_topic,
version=self.BASE_RPC_API_VERSION)
- self.client = rpc.get_client(target, '1.0')
-
- def create_backup(self, ctxt, host, backup_id, volume_id):
- LOG.debug("create_backup in rpcapi backup_id %s", backup_id)
- cctxt = self.client.prepare(server=host)
- cctxt.cast(ctxt, 'create_backup', backup_id=backup_id)
-
- def restore_backup(self, ctxt, host, backup_id, volume_id):
- LOG.debug("restore_backup in rpcapi backup_id %s", backup_id)
- cctxt = self.client.prepare(server=host)
- cctxt.cast(ctxt, 'restore_backup', backup_id=backup_id,
+ serializer = objects_base.CinderObjectSerializer()
+ self.client = rpc.get_client(target, '1.1', serializer=serializer)
+
+ def create_backup(self, ctxt, backup):
+ LOG.debug("create_backup in rpcapi backup_id %s", backup.id)
+ cctxt = self.client.prepare(server=backup.host)
+ cctxt.cast(ctxt, 'create_backup', backup=backup)
+
+ def restore_backup(self, ctxt, volume_host, backup, volume_id):
+ LOG.debug("restore_backup in rpcapi backup_id %s", backup.id)
+ cctxt = self.client.prepare(server=volume_host)
+ cctxt.cast(ctxt, 'restore_backup', backup=backup,
volume_id=volume_id)
- def delete_backup(self, ctxt, host, backup_id):
- LOG.debug("delete_backup rpcapi backup_id %s", backup_id)
- cctxt = self.client.prepare(server=host)
- cctxt.cast(ctxt, 'delete_backup', backup_id=backup_id)
+ def delete_backup(self, ctxt, backup):
+ LOG.debug("delete_backup rpcapi backup_id %s", backup.id)
+ cctxt = self.client.prepare(server=backup.host)
+ cctxt.cast(ctxt, 'delete_backup', backup=backup)
- def export_record(self, ctxt, host, backup_id):
+ def export_record(self, ctxt, backup):
LOG.debug("export_record in rpcapi backup_id %(id)s "
"on host %(host)s.",
- {'id': backup_id,
- 'host': host})
- cctxt = self.client.prepare(server=host)
- return cctxt.call(ctxt, 'export_record', backup_id=backup_id)
+ {'id': backup.id,
+ 'host': backup.host})
+ cctxt = self.client.prepare(server=backup.host)
+ return cctxt.call(ctxt, 'export_record', backup_id=backup.id)
def import_record(self,
ctxt,
host,
- backup_id,
+ backup,
backup_service,
backup_url,
backup_hosts):
LOG.debug("import_record rpcapi backup id %(id)s "
"on host %(host)s for backup_url %(url)s.",
- {'id': backup_id,
+ {'id': backup.id,
'host': host,
'url': backup_url})
cctxt = self.client.prepare(server=host)
cctxt.cast(ctxt, 'import_record',
- backup_id=backup_id,
+ backup_id=backup.id,
backup_service=backup_service,
backup_url=backup_url,
backup_hosts=backup_hosts)
- def reset_status(self, ctxt, host, backup_id, status):
+ def reset_status(self, ctxt, backup, status):
LOG.debug("reset_status in rpcapi backup_id %(id)s "
"on host %(host)s.",
- {'id': backup_id,
- 'host': host})
- cctxt = self.client.prepare(server=host)
- return cctxt.cast(ctxt, 'reset_status', backup_id=backup_id,
+ {'id': backup.id,
+ 'host': backup.host})
+ cctxt = self.client.prepare(server=backup.host)
+ return cctxt.cast(ctxt, 'reset_status', backup_id=backup.id,
status=status)
# Need to register global_opts
from cinder.common import config # noqa
+from cinder import objects
from cinder.openstack.common.report import guru_meditation_report as gmr
from cinder import service
from cinder import utils
def main():
+ objects.register_all()
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
from cinder.db import migration as db_migration
from cinder.db.sqlalchemy import api as db_api
from cinder.i18n import _
-from cinder.objects import base as objects_base
+from cinder import objects
from cinder import rpc
from cinder import utils
from cinder import version
if not rpc.initialized():
rpc.init(CONF)
target = messaging.Target(topic=CONF.volume_topic)
- serializer = objects_base.CinderObjectSerializer()
+ serializer = objects.base.CinderObjectSerializer()
self._client = rpc.get_client(target, serializer=serializer)
return self._client
on which the backup operation is running.
"""
ctxt = context.get_admin_context()
- backups = db.backup_get_all(ctxt)
+ backups = objects.BackupList.get_all(ctxt)
hdr = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s\t%-12s"
print(hdr % (_('ID'),
def main():
+ objects.register_all()
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opt(category_opt)
script_name = sys.argv[0]
# need to receive it via RPC.
__import__('cinder.objects.volume')
__import__('cinder.objects.snapshot')
+ __import__('cinder.objects.backup')
--- /dev/null
+# Copyright 2015 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_versionedobjects import fields
+
+from cinder import db
+from cinder import exception
+from cinder import objects
+from cinder.objects import base
+from cinder import utils
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+
+@base.CinderObjectRegistry.register
+class Backup(base.CinderPersistentObject, base.CinderObject,
+ base.CinderObjectDictCompat):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ fields = {
+ 'id': fields.UUIDField(),
+
+ 'user_id': fields.UUIDField(),
+ 'project_id': fields.UUIDField(),
+
+ 'volume_id': fields.UUIDField(),
+ 'host': fields.StringField(nullable=True),
+ 'availability_zone': fields.StringField(nullable=True),
+ 'container': fields.StringField(nullable=True),
+ 'parent_id': fields.StringField(nullable=True),
+ 'status': fields.StringField(nullable=True),
+ 'fail_reason': fields.StringField(nullable=True),
+ 'size': fields.IntegerField(),
+
+ 'display_name': fields.StringField(nullable=True),
+ 'display_description': fields.StringField(nullable=True),
+
+ # NOTE(dulek): Metadata field is used to store any strings by backup
+ # drivers, that's why it can't be DictOfStringsField.
+ 'service_metadata': fields.StringField(nullable=True),
+ 'service': fields.StringField(nullable=True),
+
+ 'object_count': fields.IntegerField(),
+ }
+
+ obj_extra_fields = ['name']
+
+ @property
+ def name(self):
+ return CONF.backup_name_template % self.id
+
+ def obj_make_compatible(self, primitive, target_version):
+ """Make an object representation compatible with a target version."""
+ target_version = utils.convert_version_to_tuple(target_version)
+
+ @staticmethod
+ def _from_db_object(context, backup, db_backup):
+ for name, field in backup.fields.items():
+ value = db_backup.get(name)
+ if isinstance(field, fields.IntegerField):
+ value = value if value is not None else 0
+ backup[name] = value
+
+ backup._context = context
+ backup.obj_reset_changes()
+ return backup
+
+ @base.remotable_classmethod
+ def get_by_id(cls, context, id):
+ db_backup = db.backup_get(context, id)
+ return cls._from_db_object(context, cls(context), db_backup)
+
+ @base.remotable
+ def create(self):
+ if self.obj_attr_is_set('id'):
+ raise exception.ObjectActionError(action='create',
+ reason='already created')
+ updates = self.obj_get_changes()
+
+ db_backup = db.backup_create(self._context, updates)
+ self._from_db_object(self._context, self, db_backup)
+
+ @base.remotable
+ def save(self):
+ updates = self.obj_get_changes()
+ if updates:
+ db.backup_update(self._context, self.id, updates)
+
+ self.obj_reset_changes()
+
+ @base.remotable
+ def destroy(self):
+ db.backup_destroy(self._context, self.id)
+
+
+@base.CinderObjectRegistry.register
+class BackupList(base.ObjectListBase, base.CinderObject):
+ VERSION = '1.0'
+
+ fields = {
+ 'objects': fields.ListOfObjectsField('Backup'),
+ }
+ child_versions = {
+ '1.0': '1.0'
+ }
+
+ @base.remotable_classmethod
+ def get_all(cls, context, filters=None):
+ backups = db.backup_get_all(context, filters)
+ return base.obj_make_list(context, cls(context), objects.Backup,
+ backups)
+
+ @base.remotable_classmethod
+ def get_all_by_host(cls, context, host):
+ backups = db.backup_get_all_by_host(context, host)
+ return base.obj_make_list(context, cls(context), objects.Backup,
+ backups)
+
+ @base.remotable_classmethod
+ def get_all_by_project(cls, context, project_id, filters=None):
+ backups = db.backup_get_all_by_project(context, project_id, filters)
+ return base.obj_make_list(context, cls(context), objects.Backup,
+ backups)
+
+ @base.remotable_classmethod
+ def get_all_by_volume(cls, context, volume_id, filters=None):
+ backups = db.backup_get_all_by_volume(context, volume_id, filters)
+ return base.obj_make_list(context, cls(context), objects.Backup,
+ backups)
def test_backup_reset_status_as_admin(self):
ctx = context.RequestContext('admin', 'fake', True)
- volume = db.volume_create(ctx, {'status': 'available'})
+ volume = db.volume_create(ctx, {'status': 'available',
+ 'user_id': 'user',
+ 'project_id': 'project'})
backup = db.backup_create(ctx, {'status': 'available',
'size': 1,
- 'volume_id': volume['id']})
+ 'volume_id': volume['id'],
+ 'user_id': 'user',
+ 'project_id': 'project'})
resp = self._issue_backup_reset(ctx,
backup,
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': '', 'size': 1})
backup = db.backup_create(ctx, {'status': 'available',
- 'volume_id': volume['id']})
+ 'volume_id': volume['id'],
+ 'user_id': 'user',
+ 'project_id': 'project'})
resp = self._issue_backup_reset(ctx,
backup,
self.assertEqual(res.status_int, 202)
self.assertEqual(res_dict['restore']['backup_id'], backup_id)
self.assertEqual(res_dict['restore']['volume_id'], volume_id)
- mock_restore_backup.assert_called_once_with(mock.ANY,
- 'HostB',
- backup_id,
- volume_id)
+ mock_restore_backup.assert_called_once_with(mock.ANY, u'HostB',
+ mock.ANY, volume_id)
+ # Manually check if restore_backup was called with appropriate backup.
+ self.assertEqual(backup_id, mock_restore_backup.call_args[0][2].id)
db.volume_destroy(context.get_admin_context(), volume_id)
db.backup_destroy(context.get_admin_context(), backup_id)
from cinder import db
from cinder import exception
from cinder.i18n import _
+from cinder import objects
from cinder import test
from cinder import utils
'size': 1,
'container': container,
'volume_id': '1234-5678-1234-8888',
- 'parent_id': parent_id}
+ 'parent_id': parent_id,
+ 'user_id': 'user-id',
+ 'project_id': 'project-id',
+ }
return db.backup_create(self.ctxt, backup)['id']
def setUp(self):
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_bz2(self):
self.flags(backup_compression_algorithm='bz2')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_zlib(self):
self.flags(backup_compression_algorithm='zlib')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_default_container(self):
backup_id=FAKE_BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, FAKE_BACKUP_ID)
+ backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID)
service.backup(backup, self.volume_file)
- backup = db.backup_get(self.ctxt, FAKE_BACKUP_ID)
+ backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID)
self.assertEqual(backup['container'], UPDATED_CONTAINER_NAME)
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
CONF.set_override("backup_enable_progress_timer", False)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
CONF.set_override("backup_object_number_per_notification", 10)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
self.assertFalse(_send_progress.called)
self.assertTrue(_send_progress_end.called)
CONF.set_override("backup_enable_progress_timer", True)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
self._create_backup_db_entry(container=container_name)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
def test_backup_shafile(self):
self._create_backup_db_entry(container=container_name)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Verify sha contents
backup_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
parent_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- deltabackup = db.backup_get(self.ctxt, 124)
+ deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file)
- deltabackup = db.backup_get(self.ctxt, 124)
+ deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name)
# Compare shas from both files
self._create_backup_db_entry(container=container_name, backup_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
parent_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- deltabackup = db.backup_get(self.ctxt, 124)
+ deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file)
- deltabackup = db.backup_get(self.ctxt, 124)
+ deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name)
content1 = service._read_sha256file(backup)
self._create_backup_db_entry(container=container_name, backup_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
parent_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- deltabackup = db.backup_get(self.ctxt, 124)
+ deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file)
- deltabackup = db.backup_get(self.ctxt, 124)
+ deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name)
# Verify that two shas are changed at index 16 and 20
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake'))
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake'))
self.flags(backup_sha_block_size_bytes=32)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
+
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file:
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.restore(backup, '1234-5678-1234-8888', restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
self.flags(backup_sha_block_size_bytes=1024)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file:
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.restore(backup, '1234-5678-1234-8888', restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
self.flags(backup_sha_block_size_bytes = 1024)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file:
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.restore(backup, '1234-5678-1234-8888', restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
self._create_backup_db_entry(container=container_name, backup_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
self._create_backup_db_entry(container=container_name, backup_id=124,
parent_id=123)
self.volume_file.seek(0)
- deltabackup = db.backup_get(self.ctxt, 124)
+ deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file, True)
- deltabackup = db.backup_get(self.ctxt, 124)
+ deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
with tempfile.NamedTemporaryFile() as restored_file:
- backup = db.backup_get(self.ctxt, 124)
+ backup = objects.Backup.get_by_id(self.ctxt, 124)
service.restore(backup, '1234-5678-1234-8888',
restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
def test_delete(self):
self._create_backup_db_entry()
service = nfs.NFSBackupDriver(self.ctxt)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.delete(backup)
def test_get_compressor(self):
--- /dev/null
+# Copyright 2015 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo_versionedobjects.tests import test_objects
+
+from cinder import objects
+from cinder.tests.unit import fake_volume
+
+
+fake_backup = {
+ 'id': '1',
+ 'volume_id': 'fake_id',
+ 'status': "creating",
+ 'size': 1,
+ 'display_name': 'fake_name',
+ 'display_description': 'fake_description',
+ 'user_id': 'fake_user',
+ 'project_id': 'fake_project',
+}
+
+
+class TestBackup(test_objects._LocalTest):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ @mock.patch('cinder.db.backup_get', return_value=fake_backup)
+ def test_get_by_id(self, backup_get):
+ backup = objects.Backup.get_by_id(self.context, 1)
+ self._compare(self, fake_backup, backup)
+
+ @mock.patch('cinder.db.backup_create', return_value=fake_backup)
+ def test_create(self, backup_create):
+ backup = objects.Backup(context=self.context)
+ backup.create()
+ self.assertEqual(fake_backup['id'], backup.id)
+ self.assertEqual(fake_backup['volume_id'], backup.volume_id)
+
+ @mock.patch('cinder.db.backup_update')
+ def test_save(self, backup_update):
+ backup = objects.Backup._from_db_object(
+ self.context, objects.Backup(), fake_backup)
+ backup.display_name = 'foobar'
+ backup.save()
+ backup_update.assert_called_once_with(self.context, backup.id,
+ {'display_name': 'foobar'})
+
+ @mock.patch('cinder.db.backup_destroy')
+ def test_destroy(self, backup_destroy):
+ backup = objects.Backup(context=self.context, id=1)
+ backup.destroy()
+ backup_destroy.assert_called_once_with(self.context, '1')
+
+
+class TestBackupList(test_objects._LocalTest):
+ @mock.patch('cinder.db.backup_get_all', return_value=[fake_backup])
+ def test_get_all(self, backup_get_all):
+ backups = objects.BackupList.get_all(self.context)
+ self.assertEqual(1, len(backups))
+ TestBackup._compare(self, fake_backup, backups[0])
+
+ @mock.patch('cinder.db.backup_get_all_by_project',
+ return_value=[fake_backup])
+ def test_get_all_by_project(self, get_all_by_project):
+ backups = objects.BackupList.get_all_by_project(
+ self.context, self.project_id)
+ self.assertEqual(1, len(backups))
+ TestBackup._compare(self, fake_backup, backups[0])
+
+ @mock.patch('cinder.db.backup_get_all_by_host',
+ return_value=[fake_backup])
+ def test_get_all_for_volume(self, get_all_by_host):
+ fake_volume_obj = fake_volume.fake_volume_obj(self.context)
+
+ backups = objects.BackupList.get_all_by_host(self.context,
+ fake_volume_obj.id)
+ self.assertEqual(1, len(backups))
+ TestBackup._compare(self, fake_backup, backups[0])
from cinder import context
from cinder import db
from cinder import exception
+from cinder import objects
from cinder import test
from cinder.tests.unit.backup import fake_service_with_verify as fake_service
status='creating',
size=1,
object_count=0,
- project_id='fake'):
+ project_id='fake',
+ service=None):
"""Create a backup entry in the DB.
Return the entry ID
"""
- backup = {}
- backup['volume_id'] = volume_id
- backup['user_id'] = 'fake'
- backup['project_id'] = project_id
- backup['host'] = 'testhost'
- backup['availability_zone'] = '1'
- backup['display_name'] = display_name
- backup['display_description'] = display_description
- backup['container'] = container
- backup['status'] = status
- backup['fail_reason'] = ''
- backup['service'] = CONF.backup_driver
- backup['snapshot'] = False
- backup['parent_id'] = None
- backup['size'] = size
- backup['object_count'] = object_count
- return db.backup_create(self.ctxt, backup)['id']
+ kwargs = {}
+ kwargs['volume_id'] = volume_id
+ kwargs['user_id'] = 'fake'
+ kwargs['project_id'] = project_id
+ kwargs['host'] = 'testhost'
+ kwargs['availability_zone'] = '1'
+ kwargs['display_name'] = display_name
+ kwargs['display_description'] = display_description
+ kwargs['container'] = container
+ kwargs['status'] = status
+ kwargs['fail_reason'] = ''
+ kwargs['service'] = service or CONF.backup_driver
+ kwargs['snapshot'] = False
+ kwargs['parent_id'] = None
+ kwargs['size'] = size
+ kwargs['object_count'] = object_count
+ backup = objects.Backup(context=self.ctxt, **kwargs)
+ backup.create()
+ return backup
def _create_volume_db_entry(self, display_name='test_volume',
display_description='this is a test volume',
"""Create backup metadata export entry."""
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
- backup_id = self._create_backup_db_entry(status='available',
- volume_id=vol_id)
+ backup = self._create_backup_db_entry(status='available',
+ volume_id=vol_id)
- export = self.backup_mgr.export_record(self.ctxt, backup_id)
+ export = self.backup_mgr.export_record(self.ctxt, backup)
return export
def _create_export_record_db_entry(self,
Return the entry ID
"""
- backup = {}
- backup['volume_id'] = volume_id
- backup['user_id'] = 'fake'
- backup['project_id'] = project_id
- backup['status'] = status
- return db.backup_create(self.ctxt, backup)['id']
+ kwargs = {}
+ kwargs['volume_id'] = volume_id
+ kwargs['user_id'] = 'fake'
+ kwargs['project_id'] = project_id
+ kwargs['status'] = status
+ backup = objects.Backup(context=self.ctxt, **kwargs)
+ backup.create()
+ return backup
class BackupTestCase(BaseBackupTest):
vol2_id = self._create_volume_db_entry()
self._create_volume_attach(vol2_id)
db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'})
- backup1_id = self._create_backup_db_entry(status='creating')
- backup2_id = self._create_backup_db_entry(status='restoring')
- backup3_id = self._create_backup_db_entry(status='deleting')
+ backup1 = self._create_backup_db_entry(status='creating')
+ backup2 = self._create_backup_db_entry(status='restoring')
+ backup3 = self._create_backup_db_entry(status='deleting')
self.backup_mgr.init_host()
vol1 = db.volume_get(self.ctxt, vol1_id)
vol2 = db.volume_get(self.ctxt, vol2_id)
self.assertEqual(vol2['status'], 'error_restoring')
- backup1 = db.backup_get(self.ctxt, backup1_id)
+ backup1 = db.backup_get(self.ctxt, backup1.id)
self.assertEqual(backup1['status'], 'error')
- backup2 = db.backup_get(self.ctxt, backup2_id)
+ backup2 = db.backup_get(self.ctxt, backup2.id)
self.assertEqual(backup2['status'], 'available')
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
- backup3_id)
+ backup3.id)
def test_create_backup_with_bad_volume_status(self):
"""Test error handling when creating a backup from a volume
with a bad status
"""
vol_id = self._create_volume_db_entry(status='available', size=1)
- backup_id = self._create_backup_db_entry(volume_id=vol_id)
+ backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.create_backup,
self.ctxt,
- backup_id)
+ backup)
def test_create_backup_with_bad_backup_status(self):
"""Test error handling when creating a backup with a backup
with a bad status
"""
vol_id = self._create_volume_db_entry(size=1)
- backup_id = self._create_backup_db_entry(status='available',
- volume_id=vol_id)
+ backup = self._create_backup_db_entry(status='available',
+ volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.create_backup,
self.ctxt,
- backup_id)
+ backup)
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup_with_error(self, _mock_volume_backup):
"""Test error handling when error occurs during backup creation."""
vol_id = self._create_volume_db_entry(size=1)
- backup_id = self._create_backup_db_entry(volume_id=vol_id)
+ backup = self._create_backup_db_entry(volume_id=vol_id)
_mock_volume_backup.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException,
self.backup_mgr.create_backup,
self.ctxt,
- backup_id)
+ backup)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available')
- backup = db.backup_get(self.ctxt, backup_id)
+ backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'error')
self.assertTrue(_mock_volume_backup.called)
"""Test normal backup creation."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
- backup_id = self._create_backup_db_entry(volume_id=vol_id)
+ backup = self._create_backup_db_entry(volume_id=vol_id)
- self.backup_mgr.create_backup(self.ctxt, backup_id)
+ self.backup_mgr.create_backup(self.ctxt, backup)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available')
- backup = db.backup_get(self.ctxt, backup_id)
+ backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'available')
self.assertEqual(backup['size'], vol_size)
self.assertTrue(_mock_volume_backup.called)
"""Test normal backup creation with notifications."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
- backup_id = self._create_backup_db_entry(volume_id=vol_id)
+ backup = self._create_backup_db_entry(volume_id=vol_id)
- self.backup_mgr.create_backup(self.ctxt, backup_id)
+ self.backup_mgr.create_backup(self.ctxt, backup)
self.assertEqual(2, notify.call_count)
def test_restore_backup_with_bad_volume_status(self):
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='available', size=1)
- backup_id = self._create_backup_db_entry(volume_id=vol_id)
+ backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.restore_backup,
self.ctxt,
- backup_id,
+ backup,
vol_id)
- backup = db.backup_get(self.ctxt, backup_id)
+ backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'available')
def test_restore_backup_with_bad_backup_status(self):
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
- backup_id = self._create_backup_db_entry(status='available',
- volume_id=vol_id)
+ backup = self._create_backup_db_entry(status='available',
+ volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup,
self.ctxt,
- backup_id,
+ backup,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'error')
- backup = db.backup_get(self.ctxt, backup_id)
+ backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'error')
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
"""Test error handling when an error occurs during backup restore."""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
- backup_id = self._create_backup_db_entry(status='restoring',
- volume_id=vol_id)
+ backup = self._create_backup_db_entry(status='restoring',
+ volume_id=vol_id)
_mock_volume_restore.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException,
self.backup_mgr.restore_backup,
self.ctxt,
- backup_id,
+ backup,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'error_restoring')
- backup = db.backup_get(self.ctxt, backup_id)
+ backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'available')
self.assertTrue(_mock_volume_restore.called)
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
- backup_id = self._create_backup_db_entry(status='restoring',
- volume_id=vol_id)
+ service = 'cinder.tests.backup.bad_service'
+ backup = self._create_backup_db_entry(status='restoring',
+ volume_id=vol_id,
+ service=service)
- service = 'cinder.tests.unit.backup.bad_service'
- db.backup_update(self.ctxt, backup_id, {'service': service})
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup,
self.ctxt,
- backup_id,
+ backup,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'error')
- backup = db.backup_get(self.ctxt, backup_id)
+ backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'available')
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size)
- backup_id = self._create_backup_db_entry(status='restoring',
- volume_id=vol_id)
+ backup = self._create_backup_db_entry(status='restoring',
+ volume_id=vol_id)
- self.backup_mgr.restore_backup(self.ctxt, backup_id, vol_id)
+ self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available')
- backup = db.backup_get(self.ctxt, backup_id)
+ backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'available')
self.assertTrue(_mock_volume_restore.called)
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size)
- backup_id = self._create_backup_db_entry(status='restoring',
- volume_id=vol_id)
+ backup = self._create_backup_db_entry(status='restoring',
+ volume_id=vol_id)
- self.backup_mgr.restore_backup(self.ctxt, backup_id, vol_id)
+ self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
self.assertEqual(2, notify.call_count)
def test_delete_backup_with_bad_backup_status(self):
with a bad status.
"""
vol_id = self._create_volume_db_entry(size=1)
- backup_id = self._create_backup_db_entry(status='available',
- volume_id=vol_id)
+ backup = self._create_backup_db_entry(status='available',
+ volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup,
self.ctxt,
- backup_id)
- backup = db.backup_get(self.ctxt, backup_id)
+ backup)
+ backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_error(self):
"""Test error handling when an error occurs during backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
- backup_id = self._create_backup_db_entry(status='deleting',
- display_name='fail_on_delete',
- volume_id=vol_id)
+ backup = self._create_backup_db_entry(status='deleting',
+ display_name='fail_on_delete',
+ volume_id=vol_id)
self.assertRaises(IOError,
self.backup_mgr.delete_backup,
self.ctxt,
- backup_id)
- backup = db.backup_get(self.ctxt, backup_id)
+ backup)
+ backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_bad_service(self):
with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(size=1)
- backup_id = self._create_backup_db_entry(status='deleting',
- volume_id=vol_id)
- service = 'cinder.tests.unit.backup.bad_service'
- db.backup_update(self.ctxt, backup_id, {'service': service})
+ service = 'cinder.tests.backup.bad_service'
+ backup = self._create_backup_db_entry(status='deleting',
+ volume_id=vol_id,
+ service=service)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup,
self.ctxt,
- backup_id)
- backup = db.backup_get(self.ctxt, backup_id)
+ backup)
+ backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_no_service(self):
with no service defined for that backup, relates to bug #1162908
"""
vol_id = self._create_volume_db_entry(size=1)
- backup_id = self._create_backup_db_entry(status='deleting',
- volume_id=vol_id)
- db.backup_update(self.ctxt, backup_id, {'service': None})
- self.backup_mgr.delete_backup(self.ctxt, backup_id)
+ backup = self._create_backup_db_entry(status='deleting',
+ volume_id=vol_id)
+ backup.service = None
+ backup.save()
+ self.backup_mgr.delete_backup(self.ctxt, backup)
def test_delete_backup(self):
"""Test normal backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
- backup_id = self._create_backup_db_entry(status='deleting',
- volume_id=vol_id)
- self.backup_mgr.delete_backup(self.ctxt, backup_id)
+ backup = self._create_backup_db_entry(status='deleting',
+ volume_id=vol_id)
+ self.backup_mgr.delete_backup(self.ctxt, backup)
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
- backup_id)
+ backup.id)
ctxt_read_deleted = context.get_admin_context('yes')
- backup = db.backup_get(ctxt_read_deleted, backup_id)
+ backup = db.backup_get(ctxt_read_deleted, backup.id)
self.assertEqual(backup.deleted, True)
self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at)
self.assertEqual(backup.status, 'deleted')
def test_delete_backup_with_notify(self, notify):
"""Test normal backup deletion with notifications."""
vol_id = self._create_volume_db_entry(size=1)
- backup_id = self._create_backup_db_entry(status='deleting',
- volume_id=vol_id)
- self.backup_mgr.delete_backup(self.ctxt, backup_id)
+ backup = self._create_backup_db_entry(status='deleting',
+ volume_id=vol_id)
+ self.backup_mgr.delete_backup(self.ctxt, backup)
self.assertEqual(2, notify.call_count)
def test_list_backup(self):
b2 = self._create_backup_db_entry(project_id='project1')
backups = db.backup_get_all_by_project(self.ctxt, 'project1')
self.assertEqual(len(backups), 1)
- self.assertEqual(backups[0].id, b2)
+ self.assertEqual(backups[0].id, b2.id)
def test_backup_get_all_by_project_with_deleted(self):
"""Test deleted backups don't show up in backup_get_all_by_project.
backups = db.backup_get_all_by_project(self.ctxt, 'fake')
self.assertEqual(len(backups), 0)
- backup_id_keep = self._create_backup_db_entry()
- backup_id = self._create_backup_db_entry()
- db.backup_destroy(self.ctxt, backup_id)
+ backup_keep = self._create_backup_db_entry()
+ backup = self._create_backup_db_entry()
+ db.backup_destroy(self.ctxt, backup.id)
backups = db.backup_get_all_by_project(self.ctxt, 'fake')
self.assertEqual(len(backups), 1)
- self.assertEqual(backups[0].id, backup_id_keep)
+ self.assertEqual(backups[0].id, backup_keep.id)
ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_project(ctxt_read_deleted, 'fake')
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(len(backups), 0)
- backup_id_keep = self._create_backup_db_entry()
- backup_id = self._create_backup_db_entry()
- db.backup_destroy(self.ctxt, backup_id)
+ backup_keep = self._create_backup_db_entry()
+ backup = self._create_backup_db_entry()
+ db.backup_destroy(self.ctxt, backup.id)
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(len(backups), 1)
- self.assertEqual(backups[0].id, backup_id_keep)
+ self.assertEqual(backups[0].id, backup_keep.id)
ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost')
record with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(size=1)
- backup_id = self._create_backup_db_entry(status='available',
- volume_id=vol_id)
- service = 'cinder.tests.unit.backup.bad_service'
- db.backup_update(self.ctxt, backup_id, {'service': service})
+ service = 'cinder.tests.backup.bad_service'
+ backup = self._create_backup_db_entry(status='available',
+ volume_id=vol_id,
+ service=service)
+
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record,
self.ctxt,
- backup_id)
+ backup)
def test_export_record_with_bad_backup_status(self):
"""Test error handling when exporting a backup record with a backup
"""
vol_id = self._create_volume_db_entry(status='available',
size=1)
- backup_id = self._create_backup_db_entry(status='error',
- volume_id=vol_id)
+ backup = self._create_backup_db_entry(status='error',
+ volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record,
self.ctxt,
- backup_id)
+ backup)
def test_export_record(self):
"""Test normal backup record export."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
- backup_id = self._create_backup_db_entry(status='available',
- volume_id=vol_id)
+ backup = self._create_backup_db_entry(status='available',
+ volume_id=vol_id)
- export = self.backup_mgr.export_record(self.ctxt, backup_id)
+ export = self.backup_mgr.export_record(self.ctxt, backup)
self.assertEqual(export['backup_service'], CONF.backup_driver)
self.assertTrue('backup_url' in export)
export['backup_service'],
export['backup_url'],
backup_hosts)
- backup = db.backup_get(self.ctxt, imported_record)
+ backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual(backup['status'], 'available')
self.assertEqual(backup['size'], vol_size)
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_record_import.called)
- backup = db.backup_get(self.ctxt, imported_record)
+ backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual(backup['status'], 'error')
export['backup_service'],
export['backup_url'],
backup_hosts)
- backup = db.backup_get(self.ctxt, imported_record)
+ backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual(backup['status'], 'available')
self.assertEqual(backup['size'], vol_size)
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_record_verify.called)
- backup = db.backup_get(self.ctxt, imported_record)
+ backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual(backup['status'], 'error')
def test_backup_reset_status_from_nonrestoring_to_available(
self):
vol_id = self._create_volume_db_entry(status='available',
size=1)
- backup_id = self._create_backup_db_entry(status='error',
- volume_id=vol_id)
+ backup = self._create_backup_db_entry(status='error',
+ volume_id=vol_id)
with mock.patch.object(manager.BackupManager,
'_map_service_to_driver') as \
mock_map_service_to_driver:
mock_map_service_to_driver.return_value = \
fake_service.get_backup_driver(self.ctxt)
self.backup_mgr.reset_status(self.ctxt,
- backup_id,
+ backup,
'available')
- backup = db.backup_get(self.ctxt, backup_id)
+ backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'available')
def test_backup_reset_status_to_available_invalid_backup(self):
'host': 'test',
'provider_location': '',
'size': 1})
- backup = db.backup_create(self.ctxt,
- {'status': 'error',
- 'service':
- CONF.backup_driver,
- 'volume_id': volume['id']})
+ backup = self._create_backup_db_entry(status='error',
+ volume_id=volume['id'])
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
self.assertRaises(exception.BackupVerifyUnsupportedDriver,
self.backup_mgr.reset_status,
self.ctxt,
- backup['id'],
+ backup,
'available')
- backup = db.backup_get(self.ctxt, backup['id'])
+ backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'error')
def test_backup_reset_status_from_restoring_to_available(self):
'host': 'test',
'provider_location': '',
'size': 1})
- backup = db.backup_create(self.ctxt,
- {'status': 'restoring',
- 'service':
- CONF.backup_driver,
- 'volume_id': volume['id']})
-
- self.backup_mgr.reset_status(self.ctxt,
- backup['id'],
- 'available')
- backup = db.backup_get(self.ctxt, backup['id'])
+ backup = self._create_backup_db_entry(status='restoring',
+ volume_id=volume['id'])
+
+ self.backup_mgr.reset_status(self.ctxt, backup, 'available')
+ backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'available')
def test_backup_reset_status_to_error(self):
'host': 'test',
'provider_location': '',
'size': 1})
- backup = db.backup_create(self.ctxt,
- {'status': 'creating',
- 'service':
- CONF.backup_driver,
- 'volume_id': volume['id']})
- self.backup_mgr.reset_status(self.ctxt,
- backup['id'],
- 'error')
+ backup = self._create_backup_db_entry(status='creating',
+ volume_id=volume['id'])
+ self.backup_mgr.reset_status(self.ctxt, backup, 'error')
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual(backup['status'], 'error')
from cinder import db
from cinder import exception
from cinder.i18n import _
+from cinder import objects
from cinder import test
from cinder.volume.drivers import rbd as rbddriver
vol = {'id': id, 'size': size, 'status': 'available'}
return db.volume_create(self.ctxt, vol)['id']
- def _create_backup_db_entry(self, backupid, volid, size):
- backup = {'id': backupid, 'size': size, 'volume_id': volid}
+ def _create_backup_db_entry(self, backupid, volid, size,
+ userid='user-id', projectid='project-id'):
+ backup = {'id': backupid, 'size': size, 'volume_id': volid,
+ 'user_id': userid, 'project_id': projectid}
return db.backup_create(self.ctxt, backup)['id']
def time_inc(self):
self.backup_id = str(uuid.uuid4())
self._create_backup_db_entry(self.backup_id, self.volume_id,
self.volume_size)
- self.backup = db.backup_get(self.ctxt, self.backup_id)
+ self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id)
# Create alternate volume.
self.alt_volume_id = str(uuid.uuid4())
backup_id = str(uuid.uuid4())
self._create_backup_db_entry(backup_id, volume_id, 1)
- backup = db.backup_get(self.ctxt, backup_id)
+ backup = objects.Backup.get_by_id(self.ctxt, backup_id)
self.assertRaises(exception.InvalidParameterValue, self.service.backup,
backup, self.volume_file)
from cinder import context
from cinder import db
from cinder import exception
+from cinder import objects
from cinder import test
from cinder.tests.unit.backup import fake_service
vol = {'id': id, 'size': size, 'status': 'available'}
return db.volume_create(self.ctxt, vol)['id']
- def _create_backup_db_entry(self, backupid, volid, size):
- backup = {'id': backupid, 'size': size, 'volume_id': volid}
+ def _create_backup_db_entry(self, backupid, volid, size,
+ userid='user-id', projectid='project-id'):
+ backup = {'id': backupid, 'size': size, 'volume_id': volid,
+ 'user_id': userid, 'project_id': projectid}
return db.backup_create(self.ctxt, backup)['id']
def setUp(self):
self._create_backup_db_entry(self.backup_id, self.volume_id, 1)
self._create_volume_db_entry(self.volume_id, 1)
- self.backup = db.backup_get(self.ctxt, self.backup_id)
+ self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id)
self.driver = fake_service.FakeBackupService(self.ctxt)
def test_get_metadata(self):
from cinder import db
from cinder import exception
from cinder.i18n import _
+from cinder import objects
from cinder import test
from cinder.tests.unit.backup import fake_swift_client
from cinder.tests.unit.backup import fake_swift_client2
'size': 1,
'container': container,
'volume_id': '1234-5678-1234-8888',
- 'parent_id': parent_id}
+ 'parent_id': parent_id,
+ 'user_id': 'user-id',
+ 'project_id': 'project-id',
+ }
return db.backup_create(self.ctxt, backup)['id']
def setUp(self):
self.flags(backup_compression_algorithm='none')
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_bz2(self):
self.flags(backup_compression_algorithm='bz2')
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_zlib(self):
self.flags(backup_compression_algorithm='zlib')
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_default_container(self):
self._create_backup_db_entry(container=None)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], 'volumebackups')
@mock.patch('cinder.backup.drivers.swift.SwiftBackupDriver.'
CONF.set_override("backup_swift_enable_progress_timer", False)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
CONF.set_override("backup_object_number_per_notification", 10)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
self.assertFalse(_send_progress.called)
self.assertTrue(_send_progress_end.called)
CONF.set_override("backup_swift_enable_progress_timer", True)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
self._create_backup_db_entry(container=container_name)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
def test_backup_shafile(self):
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Verify sha contents
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- deltabackup = db.backup_get(self.ctxt, 124)
+ deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file)
- deltabackup = db.backup_get(self.ctxt, 124)
+ deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name)
# Compare shas from both files
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- deltabackup = db.backup_get(self.ctxt, 124)
+ deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file)
- deltabackup = db.backup_get(self.ctxt, 124)
+ deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name)
content1 = service._read_sha256file(backup)
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- deltabackup = db.backup_get(self.ctxt, 124)
+ deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file)
- deltabackup = db.backup_get(self.ctxt, 124)
+ deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name)
# Verify that two shas are changed at index 16 and 20
self._create_backup_db_entry(container=container_name)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertRaises(exception.SwiftConnectionFailed,
service.backup,
backup, self.volume_file)
self.flags(backup_compression_algorithm='none')
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake'))
self.flags(backup_compression_algorithm='none')
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake'))
service = swift_dr.SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.restore(backup, '1234-5678-1234-8888', volume_file)
def test_restore_delta(self):
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
self._create_backup_db_entry(container=container_name, backup_id=124,
parent_id=123)
self.volume_file.seek(0)
- deltabackup = db.backup_get(self.ctxt, 124)
+ deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file, True)
- deltabackup = db.backup_get(self.ctxt, 124)
+ deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
with tempfile.NamedTemporaryFile() as restored_file:
- backup = db.backup_get(self.ctxt, 124)
+ backup = objects.Backup.get_by_id(self.ctxt, 124)
service.restore(backup, '1234-5678-1234-8888',
restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
service = swift_dr.SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertRaises(exception.SwiftConnectionFailed,
service.restore,
backup, '1234-5678-1234-8888', volume_file)
service = swift_dr.SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertRaises(exception.InvalidBackup,
service.restore,
backup, '1234-5678-1234-8888', volume_file)
def test_delete(self):
self._create_backup_db_entry()
service = swift_dr.SwiftBackupDriver(self.ctxt)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
service.delete(backup)
def test_delete_wraps_socket_error(self):
container_name = 'socket_error_on_delete'
self._create_backup_db_entry(container=container_name)
service = swift_dr.SwiftBackupDriver(self.ctxt)
- backup = db.backup_get(self.ctxt, 123)
+ backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertRaises(exception.SwiftConnectionFailed,
service.delete,
backup)
from cinder import context
from cinder import db
from cinder import exception
+from cinder import objects
from cinder import test
from cinder import utils
'size': 1,
'container': 'test-container',
'volume_id': '1234-5678-1234-8888',
- 'service_metadata': service_metadata}
+ 'service_metadata': service_metadata,
+ 'user_id': 'user-id',
+ 'project_id': 'project-id',
+ }
return db.backup_create(self.ctxt, backup)['id']
def test_backup_image(self):
with open(VOLUME_PATH, 'rw') as volume_file:
# Create two backups of the volume
- backup1 = db.backup_get(self.ctxt, backup_id1)
+ backup1 = objects.Backup.get_by_id(self.ctxt, backup_id1)
self.driver.backup(backup1, volume_file)
- backup2 = db.backup_get(self.ctxt, backup_id2)
+ backup2 = objects.Backup.get_by_id(self.ctxt, backup_id2)
self.driver.backup(backup2, volume_file)
# Create a backup that fails
- fail_back = db.backup_get(self.ctxt, backup_id3)
+ fail_back = objects.Backup.get_by_id(self.ctxt, backup_id3)
self.sim.error_injection('backup', 'fail')
self.assertRaises(exception.InvalidBackup,
self.driver.backup, fail_back, volume_file)
with open(VOLUME_PATH, 'rw') as volume_file:
# Create two backups of the volume
- backup1 = db.backup_get(self.ctxt, 123)
+ backup1 = objects.Backup.get_by_id(self.ctxt, 123)
self.driver.backup(backup1, volume_file)
- backup2 = db.backup_get(self.ctxt, 456)
+ backup2 = objects.Backup.get_by_id(self.ctxt, 456)
self.driver.backup(backup2, volume_file)
# Create a backup that fails
self._create_backup_db_entry(666, mode)
- fail_back = db.backup_get(self.ctxt, 666)
+ fail_back = objects.Backup.get_by_id(self.ctxt, 666)
self.sim.error_injection('backup', 'fail')
self.assertRaises(exception.InvalidBackup,
self.driver.backup, fail_back, volume_file)
with open(VOLUME_PATH, 'rw') as volume_file:
# Create two backups of the volume
- backup1 = db.backup_get(self.ctxt, 123)
+ backup1 = objects.Backup.get_by_id(self.ctxt, 123)
self.assertRaises(exception.InvalidBackup,
self.driver.backup, backup1, volume_file)
'container': 'fake-container',
'status': 'fake-status',
'size': 123,
- 'object_count': 1}
+ 'object_count': 1,
+ 'volume_id': 'fake-volume-id',
+ }
backup_get_all.return_value = [backup]
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
hdr = ('%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s'
backup_cmds.list()
get_admin_context.assert_called_once_with()
- backup_get_all.assert_called_once_with(ctxt)
+ backup_get_all.assert_called_once_with(ctxt, None)
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('cinder.utils.service_is_up')
"No value passed for parameter 'id' in function call",
"Module 'cinder.objects' has no 'Snapshot' member",
"Module 'cinder.objects' has no 'SnapshotList' member",
+ "Module 'cinder.objects' has no 'Backup' member",
+ "Module 'cinder.objects' has no 'BackupList' member",
]
objects_ignore_modules = ["cinder/objects/"]