Add new admin API for backup-export and backup-import.
The new commands export the backup details (not actual backup) to
a string that can be imported again in another OpenStack cloud or
if the backup database was corrupted.
The code includes a default backup driver implementation.
Backup test code converted to use mock.
blueprint cinder-backup-recover-api
DocImpact new admin API calls backup-import and backup-export
Change-Id: I564194929962e75c67630e73d8711ee6587706d4
from cinder.openstack.common import log as logging
from cinder import utils
-
LOG = logging.getLogger(__name__)
elem.set('volume_id')
+def make_backup_export_import_record(elem):
+ elem.set('backup_service')
+ elem.set('backup_url')
+
+
class BackupTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('backup', selector='backup')
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
+class BackupExportImportTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('backup-record',
+ selector='backup-record')
+ make_backup_export_import_record(root)
+ alias = Backups.alias
+ namespace = Backups.namespace
+ return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
+
+
class CreateDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
return restore
+class BackupImportDeserializer(wsgi.MetadataXMLDeserializer):
+ def default(self, string):
+ dom = utils.safe_minidom_parse_string(string)
+ backup = self._extract_backup(dom)
+ retval = {'body': {'backup-record': backup}}
+ return retval
+
+ def _extract_backup(self, node):
+ backup = {}
+ backup_node = self.find_first_child_named(node, 'backup-record')
+
+ attributes = ['backup_service', 'backup_url']
+
+ for attr in attributes:
+ if backup_node.getAttribute(attr):
+ backup[attr] = backup_node.getAttribute(attr)
+ return backup
+
+
class BackupsController(wsgi.Controller):
"""The Backups API controller for the OpenStack API."""
req, dict(new_restore.iteritems()))
return retval
+ @wsgi.response(200)
+ @wsgi.serializers(xml=BackupExportImportTemplate)
+ def export_record(self, req, id):
+ """Export a backup."""
+ LOG.debug(_('export record called for member %s.'), id)
+ context = req.environ['cinder.context']
+
+ try:
+ backup_info = self.backup_api.export_record(context, id)
+ except exception.BackupNotFound as error:
+ raise exc.HTTPNotFound(explanation=error.msg)
+ except exception.InvalidBackup as error:
+ raise exc.HTTPBadRequest(explanation=error.msg)
+
+ retval = self._view_builder.export_summary(
+ req, dict(backup_info.iteritems()))
+ LOG.debug(_('export record output: %s.'), retval)
+ return retval
+
+ @wsgi.response(201)
+ @wsgi.serializers(xml=BackupTemplate)
+ @wsgi.deserializers(xml=BackupImportDeserializer)
+ def import_record(self, req, body):
+ """Import a backup."""
+ LOG.debug(_('Importing record from %s.'), body)
+ if not self.is_valid_body(body, 'backup-record'):
+ msg = _("Incorrect request body format.")
+ raise exc.HTTPBadRequest(explanation=msg)
+ context = req.environ['cinder.context']
+ import_data = body['backup-record']
+ #Verify that body elements are provided
+ try:
+ backup_service = import_data['backup_service']
+ backup_url = import_data['backup_url']
+ except KeyError:
+ msg = _("Incorrect request body format.")
+ raise exc.HTTPBadRequest(explanation=msg)
+ LOG.debug(_('Importing backup using %(service)s and url %(url)s.'),
+ {'service': backup_service, 'url': backup_url})
+
+ try:
+ new_backup = self.backup_api.import_record(context,
+ backup_service,
+ backup_url)
+ except exception.BackupNotFound as error:
+ raise exc.HTTPNotFound(explanation=error.msg)
+ except exception.InvalidBackup as error:
+ raise exc.HTTPBadRequest(explanation=error.msg)
+ except exception.ServiceNotFound as error:
+ raise exc.HTTPInternalServerError(explanation=error.msg)
+
+ retval = self._view_builder.summary(req, dict(new_backup.iteritems()))
+ LOG.debug(_('import record output: %s.'), retval)
+ return retval
+
class Backups(extensions.ExtensionDescriptor):
"""Backups support."""
resources = []
res = extensions.ResourceExtension(
Backups.alias, BackupsController(),
- collection_actions={'detail': 'GET'},
- member_actions={'restore': 'POST'})
+ collection_actions={'detail': 'GET', 'import_record': 'POST'},
+ member_actions={'restore': 'POST', 'export_record': 'GET'})
resources.append(res)
return resources
backups_dict['backups_links'] = backups_links
return backups_dict
+
+ def export_summary(self, request, export):
+ """Generic view of an export."""
+ return {
+ 'backup-record': {
+ 'backup_service': export['backup_service'],
+ 'backup_url': export['backup_url'],
+ },
+ }
return True
return False
+ def _list_backup_services(self):
+ """List all enabled backup services.
+
+ :returns: list -- hosts for services that are enabled for backup.
+ """
+ topic = CONF.backup_topic
+ ctxt = context.get_admin_context()
+ services = self.db.service_get_all_by_topic(ctxt, topic)
+ return [srv['host'] for srv in services if not srv['disabled']]
+
def create(self, context, name, description, volume_id,
container, availability_zone=None):
"""Make the RPC call to create a volume backup."""
'volume_id': volume_id, }
return d
+
+ def export_record(self, context, backup_id):
+ """Make the RPC call to export a volume backup.
+
+ Call backup manager to execute backup export.
+
+ :param context: running context
+ :param backup_id: backup id to export
+ :returns: dictionary -- a description of how to import the backup
+ :returns: contains 'backup_url' and 'backup_service'
+ :raises: InvalidBackup
+ """
+ check_policy(context, 'backup-export')
+ backup = self.get(context, backup_id)
+ if backup['status'] != 'available':
+ msg = (_('Backup status must be available and not %s.') %
+ backup['status'])
+ raise exception.InvalidBackup(reason=msg)
+
+ LOG.debug("Calling RPCAPI with context: "
+ "%(ctx)s, host: %(host)s, backup: %(id)s.",
+ {'ctx': context,
+ 'host': backup['host'],
+ 'id': backup['id']})
+ export_data = self.backup_rpcapi.export_record(context,
+ backup['host'],
+ backup['id'])
+
+ return export_data
+
+ def import_record(self, context, backup_service, backup_url):
+ """Make the RPC call to import a volume backup.
+
+ :param context: running context
+ :param backup_service: backup service name
+ :param backup_url: backup description to be used by the backup driver
+ :raises: InvalidBackup
+ :raises: ServiceNotFound
+ """
+ check_policy(context, 'backup-import')
+
+ # NOTE(ronenkat): since we don't have a backup-scheduler
+ # we need to find a host that support the backup service
+ # that was used to create the backup.
+ # We send it to the first backup service host, and the backup manager
+ # on that host will forward it to other hosts on the hosts list if it
+ # cannot support correct service itself.
+ hosts = self._list_backup_services()
+ if len(hosts) == 0:
+ raise exception.ServiceNotFound(service_id=backup_service)
+
+ options = {'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'volume_id': '0000-0000-0000-0000',
+ 'status': 'creating', }
+ backup = self.db.backup_create(context, options)
+ first_host = hosts.pop()
+ self.backup_rpcapi.import_record(context,
+ first_host,
+ backup['id'],
+ backup_service,
+ backup_url,
+ hosts)
+
+ return backup
def delete(self, backup):
"""Delete a saved backup."""
raise NotImplementedError()
+
+ def export_record(self, backup):
+ """Export backup record.
+
+ Default backup driver implementation.
+ Serialize the backup record describing the backup into a string.
+
+ :param backup: backup entry to export
+ :returns backup_url - a string describing the backup record
+ """
+ retval = jsonutils.dumps(backup)
+ return retval.encode("base64")
+
+ def import_record(self, backup_url):
+ """Import and verify backup record.
+
+ Default backup driver implementation.
+ De-serialize the backup record into a dictionary, so we can
+ update the database.
+
+ :param backup_url: driver specific backup record string
+ :returns dictionary object with database updates
+ """
+ return jsonutils.loads(backup_url.decode("base64"))
+
+ def verify(self, backup):
+ """Verify that the backup exists on the backend.
+
+ Verify that the backup is OK, possibly following an import record
+ operation.
+
+ :param backup: backup id of the backup to verify
+ :raises: InvalidBackup, NotImplementedError
+ """
+ raise NotImplementedError()
from oslo.config import cfg
+from cinder.backup import rpcapi as backup_rpcapi
from cinder import context
from cinder import exception
from cinder import manager
self.az = CONF.storage_availability_zone
self.volume_managers = {}
self._setup_volume_drivers()
+ self.backup_rpcapi = backup_rpcapi.BackupAPI()
super(BackupManager, self).__init__(service_name='backup',
*args, **kwargs)
expected_status = 'restoring-backup'
actual_status = volume['status']
if actual_status != expected_status:
- err = _('Restore backup aborted: expected volume status '
- '%(expected_status)s but got %(actual_status)s.') % {
- 'expected_status': expected_status,
- 'actual_status': actual_status
- }
+ err = (_('Restore backup aborted, expected volume status '
+ '%(expected_status)s but got %(actual_status)s.') %
+ {'expected_status': expected_status,
+ 'actual_status': actual_status})
self.db.backup_update(context, backup_id, {'status': 'available'})
raise exception.InvalidVolume(reason=err)
expected_status = 'restoring'
actual_status = backup['status']
if actual_status != expected_status:
- err = _('Restore backup aborted: expected backup status '
- '%(expected_status)s but got %(actual_status)s.') % {
- 'expected_status': expected_status,
- 'actual_status': actual_status
- }
+ err = (_('Restore backup aborted: expected backup status '
+ '%(expected_status)s but got %(actual_status)s.') %
+ {'expected_status': expected_status,
+ 'actual_status': actual_status})
self.db.backup_update(context, backup_id, {'status': 'error',
'fail_reason': err})
self.db.volume_update(context, volume_id, {'status': 'error'})
context = context.elevated()
self.db.backup_destroy(context, backup_id)
LOG.info(_('Delete backup finished, backup %s deleted.'), backup_id)
+
+ def export_record(self, context, backup_id):
+ """Export all volume backup metadata details to allow clean import.
+
+ Export backup metadata so it could be re-imported into the database
+ without any prerequisite in the backup database.
+
+ :param context: running context
+ :param backup_id: backup id to export
+ :returns: backup_record - a description of how to import the backup
+ :returns: contains 'backup_url' - how to import the backup, and
+ :returns: 'backup_service' describing the needed driver.
+ :raises: InvalidBackup
+ """
+ LOG.info(_('Export record started, backup: %s.'), backup_id)
+
+ backup = self.db.backup_get(context, backup_id)
+
+ expected_status = 'available'
+ actual_status = backup['status']
+ if actual_status != expected_status:
+ err = (_('Export backup aborted, expected backup status '
+ '%(expected_status)s but got %(actual_status)s.') %
+ {'expected_status': expected_status,
+ 'actual_status': actual_status})
+ raise exception.InvalidBackup(reason=err)
+
+ backup_record = {}
+ backup_record['backup_service'] = backup['service']
+ backup_service = self._map_service_to_driver(backup['service'])
+ configured_service = self.driver_name
+ if backup_service != configured_service:
+ err = (_('Export record aborted, the backup service currently'
+ ' configured [%(configured_service)s] is not the'
+ ' backup service that was used to create this'
+ ' backup [%(backup_service)s].') %
+ {'configured_service': configured_service,
+ 'backup_service': backup_service})
+ raise exception.InvalidBackup(reason=err)
+
+ # Call driver to create backup description string
+ try:
+ utils.require_driver_initialized(self.driver)
+ backup_service = self.service.get_backup_driver(context)
+ backup_url = backup_service.export_record(backup)
+ backup_record['backup_url'] = backup_url
+ except Exception as err:
+ msg = unicode(err)
+ raise exception.InvalidBackup(reason=msg)
+
+ LOG.info(_('Export record finished, backup %s exported.'), backup_id)
+ return backup_record
+
+ def import_record(self,
+ context,
+ backup_id,
+ backup_service,
+ backup_url,
+ backup_hosts):
+ """Import all volume backup metadata details to the backup db.
+
+ :param context: running context
+ :param backup_id: The new backup id for the import
+ :param backup_service: The needed backup driver for import
+ :param backup_url: An identifier string to locate the backup
+ :param backup_hosts: Potential hosts to execute the import
+ :raises: InvalidBackup
+ :raises: ServiceNotFound
+ """
+ LOG.info(_('Import record started, backup_url: %s.'), backup_url)
+
+ # Can we import this backup?
+ if (backup_service != self.driver_name):
+ # No, are there additional potential backup hosts in the list?
+ if len(backup_hosts) > 0:
+ # try the next host on the list, maybe he can import
+ first_host = backup_hosts.pop()
+ self.backup_rpcapi.import_record(context,
+ first_host,
+ backup_id,
+ backup_service,
+ backup_url,
+ backup_hosts)
+ else:
+ # empty list - we are the last host on the list, fail
+ err = _('Import record failed, cannot find backup '
+ 'service to perform the import. Request service '
+ '%(service)s') % {'service': backup_service}
+ self.db.backup_update(context, backup_id, {'status': 'error',
+ 'fail_reason': err})
+ raise exception.ServiceNotFound(service_id=backup_service)
+ else:
+ # Yes...
+ try:
+ utils.require_driver_initialized(self.driver)
+ backup_service = self.service.get_backup_driver(context)
+ backup_options = backup_service.import_record(backup_url)
+ except Exception as err:
+ msg = unicode(err)
+ self.db.backup_update(context,
+ backup_id,
+ {'status': 'error',
+ 'fail_reason': msg})
+ raise exception.InvalidBackup(reason=msg)
+
+ required_import_options = ['display_name',
+ 'display_description',
+ 'container',
+ 'size',
+ 'service_metadata',
+ 'service',
+ 'object_count']
+
+ backup_update = {}
+ backup_update['status'] = 'available'
+ backup_update['service'] = self.driver_name
+ backup_update['availability_zone'] = self.az
+ backup_update['host'] = self.host
+ for entry in required_import_options:
+ if entry not in backup_options:
+ msg = (_('Backup metadata received from driver for '
+ 'import is missing %s.'), entry)
+ self.db.backup_update(context,
+ backup_id,
+ {'status': 'error',
+ 'fail_reason': msg})
+ raise exception.InvalidBackup(reason=msg)
+ backup_update[entry] = backup_options[entry]
+ # Update the database
+ self.db.backup_update(context, backup_id, backup_update)
+
+ # Verify backup
+ try:
+ backup_service.verify(backup_id)
+ except NotImplementedError:
+ LOG.warn(_('Backup service %(service)s does not support '
+ 'verify. Backup id %(id)s is not verified. '
+ 'Skipping verify.') % {'service': self.driver_name,
+ 'id': backup_id})
+ except exception.InvalidBackup as err:
+ with excutils.save_and_reraise_exception():
+ self.db.backup_update(context, backup_id,
+ {'status': 'error',
+ 'fail_reason':
+ unicode(err)})
+
+ LOG.info(_('Import record id %s metadata from driver '
+ 'finished.') % backup_id)
topic=topic)
def delete_backup(self, ctxt, host, backup_id):
- LOG.debug("delete_backup rpcapi backup_id %s", backup_id)
+ LOG.debug("delete_backup rpcapi backup_id %s", backup_id)
topic = rpc.queue_get_for(ctxt, self.topic, host)
self.cast(ctxt,
self.make_msg('delete_backup',
backup_id=backup_id),
topic=topic)
+
+ def export_record(self, ctxt, host, backup_id):
+ LOG.debug("export_record in rpcapi backup_id %(id)s "
+ "on host %(host)s.",
+ {'id': backup_id,
+ 'host': host})
+ topic = rpc.queue_get_for(ctxt, self.topic, host)
+ LOG.debug("export queue topic=%s" % topic)
+ return self.call(ctxt,
+ self.make_msg('export_record',
+ backup_id=backup_id),
+ topic=topic)
+
+ def import_record(self,
+ ctxt,
+ host,
+ backup_id,
+ backup_service,
+ backup_url,
+ backup_hosts):
+ LOG.debug("import_record rpcapi backup id $(id)s "
+ "on host %(host)s "
+ "for backup_url %(url)s." % {'id': backup_id,
+ 'host': host,
+ 'url': backup_url})
+ topic = rpc.queue_get_for(ctxt, self.topic, host)
+ self.cast(ctxt,
+ self.make_msg('import_record',
+ backup_id=backup_id,
+ backup_service=backup_service,
+ backup_url=backup_url,
+ backup_hosts=backup_hosts),
+ topic=topic)
"""
import json
+import mock
from xml.dom import minidom
import webob
return db.backup_get(context.get_admin_context(),
backup_id)[attrib_name]
- @staticmethod
- def _stub_service_get_all_by_topic(context, topic):
- return [{'availability_zone': "fake_az", 'host': 'test_host',
- 'disabled': 0, 'updated_at': timeutils.utcnow()}]
-
def test_show_backup(self):
volume_id = utils.create_volume(self.context, size=5,
status='creating')['id']
db.backup_destroy(context.get_admin_context(), backup_id2)
db.backup_destroy(context.get_admin_context(), backup_id1)
- def test_create_backup_json(self):
- self.stubs.Set(cinder.db, 'service_get_all_by_topic',
- self._stub_service_get_all_by_topic)
+ @mock.patch('cinder.db.service_get_all_by_topic')
+ def test_create_backup_json(self, _mock_service_get_all_by_topic):
+ _mock_service_get_all_by_topic.return_value = [
+ {'availability_zone': "fake_az", 'host': 'test_host',
+ 'disabled': 0, 'updated_at': timeutils.utcnow()}]
volume_id = utils.create_volume(self.context, size=5)['id']
self.assertEqual(res.status_int, 202)
self.assertIn('id', res_dict['backup'])
+ self.assertTrue(_mock_service_get_all_by_topic.called)
db.volume_destroy(context.get_admin_context(), volume_id)
- def test_create_backup_xml(self):
- self.stubs.Set(cinder.db, 'service_get_all_by_topic',
- self._stub_service_get_all_by_topic)
+ @mock.patch('cinder.db.service_get_all_by_topic')
+ def test_create_backup_xml(self, _mock_service_get_all_by_topic):
+ _mock_service_get_all_by_topic.return_value = [
+ {'availability_zone': "fake_az", 'host': 'test_host',
+ 'disabled': 0, 'updated_at': timeutils.utcnow()}]
+
volume_id = utils.create_volume(self.context, size=2)['id']
req = webob.Request.blank('/v2/fake/backups')
dom = minidom.parseString(res.body)
backup = dom.getElementsByTagName('backup')
self.assertTrue(backup.item(0).hasAttribute('id'))
+ self.assertTrue(_mock_service_get_all_by_topic.called)
db.volume_destroy(context.get_admin_context(), volume_id)
'Invalid volume: Volume to be backed up must'
' be available')
- def test_create_backup_WithOUT_enabled_backup_service(self):
+ @mock.patch('cinder.db.service_get_all_by_topic')
+ def test_create_backup_WithOUT_enabled_backup_service(
+ self,
+ _mock_service_get_all_by_topic):
# need an enabled backup service available
- def stub_empty_service_get_all_by_topic(ctxt, topic):
- return []
+ _mock_service_get_all_by_topic.return_value = []
- self.stubs.Set(cinder.db, 'service_get_all_by_topic',
- stub_empty_service_get_all_by_topic)
volume_id = utils.create_volume(self.context, size=2)['id']
req = webob.Request.blank('/v2/fake/backups')
body = {"backup": {"display_name": "nightly001",
volume = self.volume_api.get(context.get_admin_context(), volume_id)
self.assertEqual(volume['status'], 'available')
- def test_is_backup_service_enabled(self):
- def empty_service(ctxt, topic):
- return []
+ @mock.patch('cinder.db.service_get_all_by_topic')
+ def test_is_backup_service_enabled(self, _mock_service_get_all_by_topic):
test_host = 'test_host'
alt_host = 'strange_host'
-
+ empty_service = []
#service host not match with volume's host
- def host_not_match(context, topic):
- return [{'availability_zone': "fake_az", 'host': alt_host,
- 'disabled': 0, 'updated_at': timeutils.utcnow()}]
-
+ host_not_match = [{'availability_zone': "fake_az", 'host': alt_host,
+ 'disabled': 0, 'updated_at': timeutils.utcnow()}]
#service az not match with volume's az
- def az_not_match(context, topic):
- return [{'availability_zone': "strange_az", 'host': test_host,
- 'disabled': 0, 'updated_at': timeutils.utcnow()}]
-
+ az_not_match = [{'availability_zone': "strange_az", 'host': test_host,
+ 'disabled': 0, 'updated_at': timeutils.utcnow()}]
#service disabled
- def disabled_service(context, topic):
- return [{'availability_zone': "fake_az", 'host': test_host,
- 'disabled': 1, 'updated_at': timeutils.utcnow()}]
+ disabled_service = [{'availability_zone': "fake_az",
+ 'host': test_host,
+ 'disabled': 1,
+ 'updated_at': timeutils.utcnow()}]
#dead service that last reported at 20th centry
- def dead_service(context, topic):
- return [{'availability_zone': "fake_az", 'host': alt_host,
- 'disabled': 0, 'updated_at': '1989-04-16 02:55:44'}]
+ dead_service = [{'availability_zone': "fake_az", 'host': alt_host,
+ 'disabled': 0, 'updated_at': '1989-04-16 02:55:44'}]
#first service's host not match but second one works.
- def multi_services(context, topic):
- return [{'availability_zone': "fake_az", 'host': alt_host,
- 'disabled': 0, 'updated_at': timeutils.utcnow()},
- {'availability_zone': "fake_az", 'host': test_host,
- 'disabled': 0, 'updated_at': timeutils.utcnow()}]
+ multi_services = [{'availability_zone': "fake_az", 'host': alt_host,
+ 'disabled': 0, 'updated_at': timeutils.utcnow()},
+ {'availability_zone': "fake_az", 'host': test_host,
+ 'disabled': 0, 'updated_at': timeutils.utcnow()}]
+
+ #Setup mock to run through the following service cases
+ _mock_service_get_all_by_topic.side_effect = [empty_service,
+ host_not_match,
+ az_not_match,
+ disabled_service,
+ dead_service,
+ multi_services]
volume_id = utils.create_volume(self.context, size=2,
host=test_host)['id']
volume = self.volume_api.get(context.get_admin_context(), volume_id)
#test empty service
- self.stubs.Set(cinder.db, 'service_get_all_by_topic', empty_service)
self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
test_host),
False)
#test host not match service
- self.stubs.Set(cinder.db, 'service_get_all_by_topic', host_not_match)
self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
test_host),
False)
#test az not match service
- self.stubs.Set(cinder.db, 'service_get_all_by_topic', az_not_match)
self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
test_host),
False)
#test disabled service
- self.stubs.Set(cinder.db, 'service_get_all_by_topic', disabled_service)
self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
test_host),
False)
#test dead service
- self.stubs.Set(cinder.db, 'service_get_all_by_topic', dead_service)
self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
test_host),
False)
#test multi services and the last service matches
- self.stubs.Set(cinder.db, 'service_get_all_by_topic', multi_services)
self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
test_host),
True)
self.assertEqual(res_dict['badRequest']['message'],
'Incorrect request body format')
- def test_restore_backup_volume_id_unspecified(self):
+ @mock.patch('cinder.volume.API.create')
+ def test_restore_backup_volume_id_unspecified(self,
+ _mock_volume_api_create):
# intercept volume creation to ensure created volume
# has status of available
- def fake_volume_api_create(cls, context, size, name, description):
+ def fake_volume_api_create(context, size, name, description):
volume_id = utils.create_volume(self.context, size=size)['id']
return db.volume_get(context, volume_id)
- self.stubs.Set(cinder.volume.API, 'create',
- fake_volume_api_create)
+ _mock_volume_api_create.side_effect = fake_volume_api_create
backup_id = self._create_backup(size=5, status='available')
self.assertEqual(res.status_int, 202)
self.assertEqual(res_dict['restore']['backup_id'], backup_id)
- def test_restore_backup_with_InvalidInput(self):
+ @mock.patch('cinder.backup.API.restore')
+ def test_restore_backup_with_InvalidInput(self,
+ _mock_volume_api_restore):
- def fake_backup_api_restore_throwing_InvalidInput(cls, context,
- backup_id,
- volume_id):
- msg = _("Invalid input")
- raise exception.InvalidInput(reason=msg)
-
- self.stubs.Set(cinder.backup.API, 'restore',
- fake_backup_api_restore_throwing_InvalidInput)
+ msg = _("Invalid input")
+ _mock_volume_api_restore.side_effect = \
+ exception.InvalidInput(reason=msg)
backup_id = self._create_backup(status='available')
# need to create the volume referenced below first
db.backup_destroy(context.get_admin_context(), backup_id)
- def test_restore_backup_with_VolumeSizeExceedsAvailableQuota(self):
-
- def fake_backup_api_restore_throwing_VolumeSizeExceedsAvailableQuota(
- cls, context, backup_id, volume_id):
- raise exception.VolumeSizeExceedsAvailableQuota(requested='2',
- consumed='2',
- quota='3')
+ @mock.patch('cinder.backup.API.restore')
+ def test_restore_backup_with_VolumeSizeExceedsAvailableQuota(
+ self,
+ _mock_backup_restore):
- self.stubs.Set(
- cinder.backup.API,
- 'restore',
- fake_backup_api_restore_throwing_VolumeSizeExceedsAvailableQuota)
+ _mock_backup_restore.side_effect = \
+ exception.VolumeSizeExceedsAvailableQuota(requested='2',
+ consumed='2',
+ quota='3')
backup_id = self._create_backup(status='available')
# need to create the volume referenced below first
'Gigabytes quota. Requested 2G, quota is 3G and '
'2G has been consumed.')
- def test_restore_backup_with_VolumeLimitExceeded(self):
+ @mock.patch('cinder.backup.API.restore')
+ def test_restore_backup_with_VolumeLimitExceeded(self,
+ _mock_backup_restore):
- def fake_backup_api_restore_throwing_VolumeLimitExceeded(cls,
- context,
- backup_id,
- volume_id):
- raise exception.VolumeLimitExceeded(allowed=1)
-
- self.stubs.Set(cinder.backup.API, 'restore',
- fake_backup_api_restore_throwing_VolumeLimitExceeded)
+ _mock_backup_restore.side_effect = \
+ exception.VolumeLimitExceeded(allowed=1)
backup_id = self._create_backup(status='available')
# need to create the volume referenced below first
db.volume_destroy(context.get_admin_context(), volume_id)
db.backup_destroy(context.get_admin_context(), backup_id)
+
+ def test_export_record_as_non_admin(self):
+ backup_id = self._create_backup(status='available', size=10)
+ req = webob.Request.blank('/v2/fake/backups/%s/export_record' %
+ backup_id)
+ req.method = 'GET'
+ req.headers['content-type'] = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app())
+ # request is not authorized
+ self.assertEqual(res.status_int, 403)
+
+ @mock.patch('cinder.backup.rpcapi.BackupAPI.export_record')
+ def test_export_backup_record_id_specified_json(self,
+ _mock_export_record_rpc):
+ backup_id = self._create_backup(status='available', size=10)
+ ctx = context.RequestContext('admin', 'fake', is_admin=True)
+ backup_service = 'fake'
+ backup_url = 'fake'
+ _mock_export_record_rpc.return_value = \
+ {'backup_service': backup_service,
+ 'backup_url': backup_url}
+ req = webob.Request.blank('/v2/fake/backups/%s/export_record' %
+ backup_id)
+ req.method = 'GET'
+ req.headers['content-type'] = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
+ res_dict = json.loads(res.body)
+ # verify that request is successful
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res_dict['backup-record']['backup_service'],
+ backup_service)
+ self.assertEqual(res_dict['backup-record']['backup_url'],
+ backup_url)
+ db.backup_destroy(context.get_admin_context(), backup_id)
+
+ @mock.patch('cinder.backup.rpcapi.BackupAPI.export_record')
+ def test_export_record_backup_id_specified_xml(self,
+ _mock_export_record_rpc):
+ backup_id = self._create_backup(status='available', size=10)
+ ctx = context.RequestContext('admin', 'fake', is_admin=True)
+ backup_service = 'fake'
+ backup_url = 'fake'
+ _mock_export_record_rpc.return_value = \
+ {'backup_service': backup_service,
+ 'backup_url': backup_url}
+ req = webob.Request.blank('/v2/fake/backups/%s/export_record' %
+ backup_id)
+ req.method = 'GET'
+ req.headers['Content-Type'] = 'application/xml'
+ req.headers['Accept'] = 'application/xml'
+ res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
+ self.assertEqual(res.status_int, 200)
+ dom = minidom.parseString(res.body)
+ export = dom.getElementsByTagName('backup-record')
+ self.assertEqual(export.item(0).getAttribute('backup_service'),
+ backup_service)
+ self.assertEqual(export.item(0).getAttribute('backup_url'),
+ backup_url)
+
+ #db.backup_destroy(context.get_admin_context(), backup_id)
+
+ def test_export_record_with_bad_backup_id(self):
+
+ ctx = context.RequestContext('admin', 'fake', is_admin=True)
+ backup_id = 'bad_id'
+ req = webob.Request.blank('/v2/fake/backups/%s/export_record' %
+ backup_id)
+ req.method = 'GET'
+ req.headers['content-type'] = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
+ res_dict = json.loads(res.body)
+ self.assertEqual(res.status_int, 404)
+ self.assertEqual(res_dict['itemNotFound']['code'], 404)
+ self.assertEqual(res_dict['itemNotFound']['message'],
+ 'Backup %s could not be found.' % backup_id)
+
+ def test_export_record_for_unavailable_backup(self):
+
+ backup_id = self._create_backup(status='restoring')
+ ctx = context.RequestContext('admin', 'fake', is_admin=True)
+ req = webob.Request.blank('/v2/fake/backups/%s/export_record' %
+ backup_id)
+ req.method = 'GET'
+ req.headers['content-type'] = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
+ res_dict = json.loads(res.body)
+ self.assertEqual(res.status_int, 400)
+ self.assertEqual(res_dict['badRequest']['code'], 400)
+ self.assertEqual(res_dict['badRequest']['message'],
+ 'Invalid backup: Backup status must be available '
+ 'and not restoring.')
+ db.backup_destroy(context.get_admin_context(), backup_id)
+
+ @mock.patch('cinder.backup.rpcapi.BackupAPI.export_record')
+ def test_export_record_with_unavailable_service(self,
+ _mock_export_record_rpc):
+ msg = 'fake unavailable service'
+ _mock_export_record_rpc.side_effect = \
+ exception.InvalidBackup(reason=msg)
+ backup_id = self._create_backup(status='available')
+ ctx = context.RequestContext('admin', 'fake', is_admin=True)
+ req = webob.Request.blank('/v2/fake/backups/%s/export_record' %
+ backup_id)
+ req.method = 'GET'
+ req.headers['content-type'] = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 400)
+ self.assertEqual(res_dict['badRequest']['code'], 400)
+ self.assertEqual(res_dict['badRequest']['message'],
+ 'Invalid backup: %s' % msg)
+ db.backup_destroy(context.get_admin_context(), backup_id)
+
+ def test_import_record_as_non_admin(self):
+ backup_service = 'fake'
+ backup_url = 'fake'
+ req = webob.Request.blank('/v2/fake/backups/import_record')
+ body = {'backup-record': {'backup_service': backup_service,
+ 'backup_url': backup_url}}
+ req.body = json.dumps(body)
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app())
+ # request is not authorized
+ self.assertEqual(res.status_int, 403)
+
+ @mock.patch('cinder.backup.api.API._list_backup_services')
+ @mock.patch('cinder.backup.rpcapi.BackupAPI.import_record')
+ def test_import_record_volume_id_specified_json(self,
+ _mock_import_record_rpc,
+ _mock_list_services):
+ ctx = context.RequestContext('admin', 'fake', is_admin=True)
+ backup_service = 'fake'
+ backup_url = 'fake'
+ _mock_import_record_rpc.return_value = \
+ {'display_name': 'fake',
+ 'display_description': 'fake',
+ 'container': 'fake',
+ 'size': 1,
+ 'service_metadata': 'fake',
+ 'service': 'fake',
+ 'object_count': 1,
+ 'status': 'available',
+ 'availability_zone': 'fake'}
+ _mock_list_services.return_value = ['fake']
+
+ req = webob.Request.blank('/v2/fake/backups/import_record')
+ body = {'backup-record': {'backup_service': backup_service,
+ 'backup_url': backup_url}}
+ req.body = json.dumps(body)
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
+ res_dict = json.loads(res.body)
+ # verify that request is successful
+ self.assertEqual(res.status_int, 201)
+ self.assertTrue('id' in res_dict['backup'])
+
+ @mock.patch('cinder.backup.api.API._list_backup_services')
+ @mock.patch('cinder.backup.rpcapi.BackupAPI.import_record')
+ def test_import_record_volume_id_specified_xml(self,
+ _mock_import_record_rpc,
+ _mock_list_services):
+ ctx = context.RequestContext('admin', 'fake', is_admin=True)
+ backup_service = 'fake'
+ backup_url = 'fake'
+ _mock_import_record_rpc.return_value = \
+ {'display_name': 'fake',
+ 'display_description': 'fake',
+ 'container': 'fake',
+ 'size': 1,
+ 'service_metadata': 'fake',
+ 'service': 'fake',
+ 'object_count': 1,
+ 'status': 'available',
+ 'availability_zone': 'fake'}
+ _mock_list_services.return_value = ['fake']
+
+ req = webob.Request.blank('/v2/fake/backups/import_record')
+ req.body = ('<backup-record backup_service="%(backup_service)s" '
+ 'backup_url="%(backup_url)s"/>') \
+ % {'backup_url': backup_url,
+ 'backup_service': backup_service}
+
+ req.method = 'POST'
+ req.headers['Content-Type'] = 'application/xml'
+ req.headers['Accept'] = 'application/xml'
+ res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
+
+ self.assertEqual(res.status_int, 201)
+ dom = minidom.parseString(res.body)
+ backup = dom.getElementsByTagName('backup')
+ self.assertTrue(backup.item(0).hasAttribute('id'))
+
+ @mock.patch('cinder.backup.api.API._list_backup_services')
+ def test_import_record_with_no_backup_services(self,
+ _mock_list_services):
+ ctx = context.RequestContext('admin', 'fake', is_admin=True)
+ backup_service = 'fake'
+ backup_url = 'fake'
+ _mock_list_services.return_value = []
+
+ req = webob.Request.blank('/v2/fake/backups/import_record')
+ body = {'backup-record': {'backup_service': backup_service,
+ 'backup_url': backup_url}}
+ req.body = json.dumps(body)
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
+ res_dict = json.loads(res.body)
+ self.assertEqual(res.status_int, 500)
+ self.assertEqual(res_dict['computeFault']['code'], 500)
+ self.assertEqual(res_dict['computeFault']['message'],
+ 'Service %s could not be found.'
+ % backup_service)
+
+ @mock.patch('cinder.backup.api.API._list_backup_services')
+ @mock.patch('cinder.backup.rpcapi.BackupAPI.import_record')
+ def test_import_backup_with_missing_backup_services(self,
+ _mock_import_record,
+ _mock_list_services):
+ ctx = context.RequestContext('admin', 'fake', is_admin=True)
+ backup_service = 'fake'
+ backup_url = 'fake'
+ _mock_list_services.return_value = ['no-match1', 'no-match2']
+ _mock_import_record.side_effect = \
+ exception.ServiceNotFound(service_id='fake')
+ req = webob.Request.blank('/v2/fake/backups/import_record')
+ body = {'backup-record': {'backup_service': backup_service,
+ 'backup_url': backup_url}}
+ req.body = json.dumps(body)
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
+ res_dict = json.loads(res.body)
+ self.assertEqual(res.status_int, 500)
+ self.assertEqual(res_dict['computeFault']['code'], 500)
+ self.assertEqual(res_dict['computeFault']['message'],
+ 'Service %s could not be found.'
+ % backup_service)
+
+ def test_import_record_with_missing_body_elements(self):
+ ctx = context.RequestContext('admin', 'fake', is_admin=True)
+ backup_service = 'fake'
+ backup_url = 'fake'
+
+ #test with no backup_service
+ req = webob.Request.blank('/v2/fake/backups/import_record')
+ body = {'backup-record': {'backup_url': backup_url}}
+ req.body = json.dumps(body)
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
+ res_dict = json.loads(res.body)
+ self.assertEqual(res.status_int, 400)
+ self.assertEqual(res_dict['badRequest']['code'], 400)
+ self.assertEqual(res_dict['badRequest']['message'],
+ 'Incorrect request body format.')
+
+ #test with no backup_url
+ req = webob.Request.blank('/v2/fake/backups/import_record')
+ body = {'backup-record': {'backup_service': backup_service}}
+ req.body = json.dumps(body)
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
+ res_dict = json.loads(res.body)
+ self.assertEqual(res.status_int, 400)
+ self.assertEqual(res_dict['badRequest']['code'], 400)
+ self.assertEqual(res_dict['badRequest']['message'],
+ 'Incorrect request body format.')
+
+ #test with no backup_url and backup_url
+ req = webob.Request.blank('/v2/fake/backups/import_record')
+ body = {'backup-record': {}}
+ req.body = json.dumps(body)
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
+ res_dict = json.loads(res.body)
+ self.assertEqual(res.status_int, 400)
+ self.assertEqual(res_dict['badRequest']['code'], 400)
+ self.assertEqual(res_dict['badRequest']['message'],
+ 'Incorrect request body format.')
+
+ def test_import_record_with_no_body(self):
+ ctx = context.RequestContext('admin', 'fake', is_admin=True)
+
+ req = webob.Request.blank('/v2/fake/backups/import_record')
+ req.body = json.dumps(None)
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
+ res_dict = json.loads(res.body)
+ # verify that request is successful
+ self.assertEqual(res.status_int, 400)
+ self.assertEqual(res_dict['badRequest']['code'], 400)
+ self.assertEqual(res_dict['badRequest']['message'],
+ 'Incorrect request body format.')
"backup:delete": [],
"backup:get": [],
"backup:get_all": [],
- "backup:restore": []
+ "backup:restore": [],
+ "backup:backup-import": [["rule:admin_api"]],
+ "backup:backup-export": [["rule:admin_api"]]
+
}
"""
+import mock
import tempfile
from oslo.config import cfg
display_description='this is a test backup',
container='volumebackups',
status='creating',
- size=0,
+ size=1,
object_count=0,
project_id='fake'):
"""Create a backup entry in the DB.
vol['attach_status'] = 'detached'
return db.volume_create(self.ctxt, vol)['id']
+ def _create_exported_record_entry(self, vol_size=1):
+ """Create backup metadata export entry."""
+ vol_id = self._create_volume_db_entry(status='available',
+ size=vol_size)
+ backup_id = self._create_backup_db_entry(status='available',
+ volume_id=vol_id)
+
+ export = self.backup_mgr.export_record(self.ctxt, backup_id)
+ return export
+
+ def _create_export_record_db_entry(self,
+ volume_id='0000',
+ status='creating',
+ project_id='fake'):
+ """Create a backup entry in the DB.
+
+ Return the entry ID
+ """
+ backup = {}
+ backup['volume_id'] = volume_id
+ backup['user_id'] = 'fake'
+ backup['project_id'] = project_id
+ backup['status'] = status
+ return db.backup_create(self.ctxt, backup)['id']
+
def test_init_host(self):
"""Make sure stuck volumes and backups are reset to correct
states when backup_manager.init_host() is called
self.ctxt,
backup_id)
- def test_create_backup_with_error(self):
+ @mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
+ def test_create_backup_with_error(self, _mock_volume_backup):
"""Test error handling when error occurs during backup creation."""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
- def fake_backup_volume(context, backup, backup_service):
- raise FakeBackupException('fake')
-
- self.stubs.Set(self.backup_mgr.driver, 'backup_volume',
- fake_backup_volume)
-
+ _mock_volume_backup.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException,
self.backup_mgr.create_backup,
self.ctxt,
self.assertEqual(vol['status'], 'available')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
+ self.assertTrue(_mock_volume_backup.called)
- def test_create_backup(self):
+ @mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
+ def test_create_backup(self, _mock_volume_backup):
"""Test normal backup creation."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
- def fake_backup_volume(context, backup, backup_service):
- pass
-
- self.stubs.Set(self.backup_mgr.driver, 'backup_volume',
- fake_backup_volume)
-
self.backup_mgr.create_backup(self.ctxt, backup_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
self.assertEqual(backup['size'], vol_size)
+ self.assertTrue(_mock_volume_backup.called)
def test_restore_backup_with_bad_volume_status(self):
"""Test error handling when restoring a backup to a volume
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
- def test_restore_backup_with_driver_error(self):
+ @mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
+ def test_restore_backup_with_driver_error(self, _mock_volume_restore):
"""Test error handling when an error occurs during backup restore."""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup_id = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
- def fake_restore_backup(context, backup, volume, backup_service):
- raise FakeBackupException('fake')
-
- self.stubs.Set(self.backup_mgr.driver, 'restore_backup',
- fake_restore_backup)
-
+ _mock_volume_restore.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException,
self.backup_mgr.restore_backup,
self.ctxt,
self.assertEqual(vol['status'], 'error_restoring')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
+ self.assertTrue(_mock_volume_restore.called)
def test_restore_backup_with_bad_service(self):
"""Test error handling when attempting a restore of a backup
backup_id = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
- def fake_restore_backup(context, backup, volume, backup_service):
- pass
-
- self.stubs.Set(self.backup_mgr.driver, 'restore_backup',
- fake_restore_backup)
-
service = 'cinder.tests.backup.bad_service'
db.backup_update(self.ctxt, backup_id, {'service': service})
self.assertRaises(exception.InvalidBackup,
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
- def test_restore_backup(self):
+ @mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
+ def test_restore_backup(self, _mock_volume_restore):
"""Test normal backup restoration."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
backup_id = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
- def fake_restore_backup(context, backup, volume, backup_service):
- pass
-
- self.stubs.Set(self.backup_mgr.driver, 'restore_backup',
- fake_restore_backup)
-
self.backup_mgr.restore_backup(self.ctxt, backup_id, vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
+ self.assertTrue(_mock_volume_restore.called)
def test_delete_backup_with_bad_backup_status(self):
"""Test error handling when deleting a backup with a backup
self.assertEqual('cinder.backup.drivers.swift',
backup_mgr.driver_name)
setattr(cfg.CONF, 'backup_driver', old_setting)
+
+ def test_export_record_with_bad_service(self):
+ """Test error handling when attempting an export of a backup
+ record with a different service to that used to create the backup.
+ """
+ vol_id = self._create_volume_db_entry(size=1)
+ backup_id = self._create_backup_db_entry(status='available',
+ volume_id=vol_id)
+ service = 'cinder.tests.backup.bad_service'
+ db.backup_update(self.ctxt, backup_id, {'service': service})
+ self.assertRaises(exception.InvalidBackup,
+ self.backup_mgr.export_record,
+ self.ctxt,
+ backup_id)
+
+ def test_export_record_with_bad_backup_status(self):
+ """Test error handling when exporting a backup record with a backup
+ with a bad status.
+ """
+ vol_id = self._create_volume_db_entry(status='available',
+ size=1)
+ backup_id = self._create_backup_db_entry(status='error',
+ volume_id=vol_id)
+ self.assertRaises(exception.InvalidBackup,
+ self.backup_mgr.export_record,
+ self.ctxt,
+ backup_id)
+
+ def test_export_record(self):
+ """Test normal backup record export."""
+ vol_size = 1
+ vol_id = self._create_volume_db_entry(status='available',
+ size=vol_size)
+ backup_id = self._create_backup_db_entry(status='available',
+ volume_id=vol_id)
+
+ export = self.backup_mgr.export_record(self.ctxt, backup_id)
+ self.assertEqual(export['backup_service'], CONF.backup_driver)
+ self.assertTrue('backup_url' in export)
+
+ def test_import_record_with_verify_not_implemented(self):
+ """Test normal backup record import.
+
+ Test the case when import succeeds for the case that the
+ driver does not support verify.
+ """
+ vol_size = 1
+ export = self._create_exported_record_entry(vol_size=vol_size)
+ imported_record = self._create_export_record_db_entry()
+ backup_hosts = []
+ backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
+ _mock_backup_verify_class = ('%s.%s.%s' %
+ (backup_driver.__module__,
+ backup_driver.__class__.__name__,
+ 'verify'))
+ with mock.patch(_mock_backup_verify_class) as _mock_record_verify:
+ _mock_record_verify.side_effect = NotImplementedError()
+ self.backup_mgr.import_record(self.ctxt,
+ imported_record,
+ export['backup_service'],
+ export['backup_url'],
+ backup_hosts)
+ backup = db.backup_get(self.ctxt, imported_record)
+ self.assertEqual(backup['status'], 'available')
+ self.assertEqual(backup['size'], vol_size)
+
+ def test_import_record_with_verify(self):
+ """Test normal backup record import.
+
+ Test the case when import succeeds for the case that the
+ driver implements verify.
+ """
+ vol_size = 1
+ export = self._create_exported_record_entry(vol_size=vol_size)
+ imported_record = self._create_export_record_db_entry()
+ backup_hosts = []
+ backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
+ _mock_backup_verify_class = ('%s.%s.%s' %
+ (backup_driver.__module__,
+ backup_driver.__class__.__name__,
+ 'verify'))
+ with mock.patch(_mock_backup_verify_class) as _mock_record_verify:
+ self.backup_mgr.import_record(self.ctxt,
+ imported_record,
+ export['backup_service'],
+ export['backup_url'],
+ backup_hosts)
+ backup = db.backup_get(self.ctxt, imported_record)
+ self.assertEqual(backup['status'], 'available')
+ self.assertEqual(backup['size'], vol_size)
+
+ def test_import_record_with_bad_service(self):
+ """Test error handling when attempting an import of a backup
+ record with a different service to that used to create the backup.
+ """
+ export = self._create_exported_record_entry()
+ export['backup_service'] = 'cinder.tests.backup.bad_service'
+ imported_record = self._create_export_record_db_entry()
+
+ #Test the case where the additional hosts list is empty
+ backup_hosts = []
+ self.assertRaises(exception.ServiceNotFound,
+ self.backup_mgr.import_record,
+ self.ctxt,
+ imported_record,
+ export['backup_service'],
+ export['backup_url'],
+ backup_hosts)
+
+ #Test that the import backup keeps calling other hosts to find a
+ #suitable host for the backup service
+ backup_hosts = ['fake1', 'fake2']
+ BackupAPI_import = 'cinder.backup.rpcapi.BackupAPI.import_record'
+ with mock.patch(BackupAPI_import) as _mock_backup_import:
+ self.backup_mgr.import_record(self.ctxt,
+ imported_record,
+ export['backup_service'],
+ export['backup_url'],
+ backup_hosts)
+ self.assertTrue(_mock_backup_import.called)
+
+ def test_import_record_with_invalid_backup(self):
+ """Test error handling when attempting an import of a backup
+ record where the backup driver returns an exception.
+ """
+ export = self._create_exported_record_entry()
+ backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
+ _mock_record_import_class = ('%s.%s.%s' %
+ (backup_driver.__module__,
+ backup_driver.__class__.__name__,
+ 'import_record'))
+ imported_record = self._create_export_record_db_entry()
+ backup_hosts = []
+ with mock.patch(_mock_record_import_class) as _mock_record_import:
+ _mock_record_import.side_effect = FakeBackupException('fake')
+ self.assertRaises(exception.InvalidBackup,
+ self.backup_mgr.import_record,
+ self.ctxt,
+ imported_record,
+ export['backup_service'],
+ export['backup_url'],
+ backup_hosts)
+ self.assertTrue(_mock_record_import.called)
+ backup = db.backup_get(self.ctxt, imported_record)
+ self.assertEqual(backup['status'], 'error')
+
+ def test_import_record_with_verify_invalid_backup(self):
+ """Test error handling when attempting an import of a backup
+ record where the backup driver returns an exception.
+ """
+ vol_size = 1
+ export = self._create_exported_record_entry(vol_size=vol_size)
+ imported_record = self._create_export_record_db_entry()
+ backup_hosts = []
+ backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
+ _mock_backup_verify_class = ('%s.%s.%s' %
+ (backup_driver.__module__,
+ backup_driver.__class__.__name__,
+ 'verify'))
+ with mock.patch(_mock_backup_verify_class) as _mock_record_verify:
+ _mock_record_verify.side_effect = \
+ exception.InvalidBackup(reason='fake')
+
+ self.assertRaises(exception.InvalidBackup,
+ self.backup_mgr.import_record,
+ self.ctxt,
+ imported_record,
+ export['backup_service'],
+ export['backup_url'],
+ backup_hosts)
+ self.assertTrue(_mock_record_verify.called)
+ backup = db.backup_get(self.ctxt, imported_record)
+ self.assertEqual(backup['status'], 'error')
from cinder import test
+_backup_db_fields = ['id', 'user_id', 'project_id',
+ 'volume_id', 'host', 'availability_zone',
+ 'display_name', 'display_description',
+ 'container', 'status', 'fail_reason',
+ 'service_metadata', 'service', 'size',
+ 'object_count']
+
+
class BackupBaseDriverTestCase(test.TestCase):
def _create_volume_db_entry(self, id, size):
json_metadata = self.driver.get_metadata(self.volume_id)
self.driver.put_metadata(self.volume_id, json_metadata)
+ def test_export_record(self):
+ export_string = self.driver.export_record(self.backup)
+ export_dict = jsonutils.loads(export_string.decode("base64"))
+ # Make sure we don't lose data when converting to string
+ for key in _backup_db_fields:
+ self.assertTrue(key in export_dict)
+ self.assertEqual(self.backup[key], export_dict[key])
+
+ def test_import_record(self):
+ export_string = self.driver.export_record(self.backup)
+ imported_backup = self.driver.import_record(export_string)
+ # Make sure we don't lose data when converting from string
+ for key in _backup_db_fields:
+ self.assertTrue(key in imported_backup)
+ self.assertEqual(imported_backup[key], self.backup[key])
+
+ def test_verify(self):
+ self.assertRaises(NotImplementedError,
+ self.driver.verify, self.backup)
+
def tearDown(self):
super(BackupBaseDriverTestCase, self).tearDown()
"backup:get": [],
"backup:get_all": [],
"backup:restore": [],
+ "backup:backup-import": [["rule:admin_api"]],
+ "backup:backup-export": [["rule:admin_api"]],
"snapshot_extension:snapshot_actions:update_snapshot_status": []
}