from cinder.api import extensions
from cinder.api.openstack import wsgi
+from cinder import backup
from cinder import db
from cinder import exception
from cinder.i18n import _
# singular name of the resource
self.resource_name = self.collection.rstrip('s')
self.volume_api = volume.API()
+ self.backup_api = backup.API()
def _update(self, *args, **kwargs):
raise NotImplementedError()
return self.volume_api.delete_snapshot(*args, **kwargs)
+class BackupAdminController(AdminController):
+ """AdminController for Backups."""
+
+ collection = 'backups'
+
+ valid_status = set(['available',
+ 'error'
+ ])
+
+ @wsgi.action('os-reset_status')
+ def _reset_status(self, req, id, body):
+ """Reset status on the resource."""
+ context = req.environ['cinder.context']
+ self.authorize(context, 'reset_status')
+ update = self.validate_update(body['os-reset_status'])
+ msg = "Updating %(resource)s '%(id)s' with '%(update)r'"
+ LOG.debug(msg, {'resource': self.resource_name, 'id': id,
+ 'update': update})
+
+ notifier_info = {'id': id, 'update': update}
+ notifier = rpc.get_notifier('backupStatusUpdate')
+ notifier.info(context, self.collection + '.reset_status.start',
+ notifier_info)
+
+ try:
+ self.backup_api.reset_status(context=context, backup_id=id,
+ status=update['status'])
+ except exception.NotFound as e:
+ raise exc.HTTPNotFound(explanation=e.msg)
+ return webob.Response(status_int=202)
+
+
class Admin_actions(extensions.ExtensionDescriptor):
"""Enable admin actions."""
def get_controller_extensions(self):
exts = []
- for class_ in (VolumeAdminController, SnapshotAdminController):
+ for class_ in (VolumeAdminController, SnapshotAdminController,
+ BackupAdminController):
controller = class_()
extension = extensions.ControllerExtension(
self, class_.collection, controller)
res = extensions.ResourceExtension(
Backups.alias, BackupsController(),
collection_actions={'detail': 'GET', 'import_record': 'POST'},
- member_actions={'restore': 'POST', 'export_record': 'GET'})
+ member_actions={'restore': 'POST', 'export_record': 'GET',
+ 'action': 'POST'})
resources.append(res)
return resources
return d
+ def reset_status(self, context, backup_id, status):
+ """Make the RPC call to reset a volume backup's status.
+
+ Call backup manager to execute backup status reset operation.
+ :param context: running context
+ :param backup_id: which backup's status to be reset
+ :parma status: backup's status to be reset
+ :raises: InvalidBackup
+ """
+ # get backup info
+ backup = self.get(context, backup_id)
+ # send to manager to do reset operation
+ self.backup_rpcapi.reset_status(ctxt=context, host=backup['host'],
+ backup_id=backup_id, status=status)
+
def export_record(self, context, backup_id):
"""Make the RPC call to export a volume backup.
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder import quota
+from cinder import rpc
from cinder import utils
from cinder.volume import utils as volume_utils
LOG.info(_('Import record id %s metadata from driver '
'finished.') % backup_id)
+
+ def reset_status(self, context, backup_id, status):
+ """Reset volume backup status.
+
+ :param context: running context
+ :param backup_id: The backup id for reset status operation
+ :param status: The status to be set
+ :raises: InvalidBackup
+ :raises: BackupVerifyUnsupportedDriver
+ :raises: AttributeError
+ """
+ LOG.info(_('Reset backup status started, backup_id: '
+ '%(backup_id)s, status: %(status)s.'),
+ {'backup_id': backup_id,
+ 'status': status})
+ try:
+ # NOTE(flaper87): Verify the driver is enabled
+ # before going forward. The exception will be caught
+ # and the backup status updated. Fail early since there
+ # are no other status to change but backup's
+ utils.require_driver_initialized(self.driver)
+ except exception.DriverNotInitialized:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Backup driver has not been initialized"))
+
+ backup = self.db.backup_get(context, backup_id)
+ backup_service = self._map_service_to_driver(backup['service'])
+ LOG.info(_('Backup service: %s.'), backup_service)
+ if backup_service is not None:
+ configured_service = self.driver_name
+ if backup_service != configured_service:
+ err = _('Reset backup status aborted, the backup service'
+ ' currently configured [%(configured_service)s] '
+ 'is not the backup service that was used to create'
+ ' this backup [%(backup_service)s].') % \
+ {'configured_service': configured_service,
+ 'backup_service': backup_service}
+ raise exception.InvalidBackup(reason=err)
+ # Verify backup
+ try:
+ # check whether the backup is ok or not
+ if status == 'available' and backup['status'] != 'restoring':
+ # check whether we could verify the backup is ok or not
+ if isinstance(backup_service,
+ driver.BackupDriverWithVerify):
+ backup_service.verify(backup_id)
+ self.db.backup_update(context, backup_id,
+ {'status': status})
+ # driver does not support verify function
+ else:
+ msg = (_('Backup service %(configured_service)s '
+ 'does not support verify. Backup id'
+ ' %(id)s is not verified. '
+ 'Skipping verify.') %
+ {'configured_service': self.driver_name,
+ 'id': backup_id})
+ raise exception.BackupVerifyUnsupportedDriver(
+ reason=msg)
+ # reset status to error or from restoring to available
+ else:
+ if (status == 'error' or
+ (status == 'available' and
+ backup['status'] == 'restoring')):
+ self.db.backup_update(context, backup_id,
+ {'status': status})
+ except exception.InvalidBackup:
+ with excutils.save_and_reraise_exception():
+ msg = (_("Backup id %(id)s is not invalid. "
+ "Skipping reset.") % {'id': backup_id})
+ LOG.error(msg)
+ except exception.BackupVerifyUnsupportedDriver:
+ with excutils.save_and_reraise_exception():
+ msg = (_('Backup service %(configured_service)s '
+ 'does not support verify. Backup id'
+ ' %(id)s is not verified. '
+ 'Skipping verify.') %
+ {'configured_service': self.driver_name,
+ 'id': backup_id})
+ LOG.error(msg)
+ except AttributeError:
+ msg = (_('Backup service %(service)s does not support '
+ 'verify. Backup id %(id)s is not verified. '
+ 'Skipping reset.') %
+ {'service': self.driver_name,
+ 'id': backup_id})
+ LOG.error(msg)
+ raise exception.BackupVerifyUnsupportedDriver(
+ reason=msg)
+
+ # send notification to ceilometer
+ notifier_info = {'id': backup_id, 'update': {'status': status}}
+ notifier = rpc.get_notifier('backupStatusUpdate')
+ notifier.info(context, "backups" + '.reset_status.end',
+ notifier_info)
\ No newline at end of file
backup_service=backup_service,
backup_url=backup_url,
backup_hosts=backup_hosts)
+
+ def reset_status(self, ctxt, host, backup_id, status):
+ LOG.debug("reset_status in rpcapi backup_id %(id)s "
+ "on host %(host)s.",
+ {'id': backup_id,
+ 'host': host})
+ cctxt = self.client.prepare(server=host)
+ return cctxt.cast(ctxt, 'reset_status', backup_id=backup_id,
+ status=status)
\ No newline at end of file
message = _("Unsupported backup metadata version requested")
+class BackupVerifyUnsupportedDriver(BackupDriverException):
+ message = _("Unsupported backup verify driver")
+
+
class VolumeMetadataBackupExists(BackupDriverException):
message = _("Metadata backup already exists for this volume")
from oslo.config import cfg
import webob
+from webob import exc
from cinder.api.contrib import admin_actions
from cinder.brick.local_dev import lvm as brick_lvm
resp = req.get_response(app())
return resp
+ def _issue_backup_reset(self, ctx, backup, updated_status):
+ req = webob.Request.blank('/v2/fake/backups/%s/action' % backup['id'])
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+ req.body = \
+ jsonutils.dumps({'os-reset_status': updated_status})
+ req.environ['cinder.context'] = ctx
+ resp = req.get_response(app())
+ return resp
+
def test_valid_updates(self):
vac = admin_actions.VolumeAdminController()
# status is still 'error'
self.assertEqual(volume['status'], 'error')
+ def test_backup_reset_status_as_admin(self):
+ ctx = context.RequestContext('admin', 'fake', True)
+ volume = db.volume_create(ctx, {'status': 'available'})
+ backup = db.backup_create(ctx, {'status': 'available',
+ 'size': 1,
+ 'volume_id': volume['id']})
+
+ resp = self._issue_backup_reset(ctx,
+ backup,
+ {'status': 'error'})
+
+ self.assertEqual(resp.status_int, 202)
+
+ def test_backup_reset_status_as_non_admin(self):
+ ctx = context.RequestContext('fake', 'fake')
+ backup = db.backup_create(ctx, {'status': 'available',
+ 'size': 1,
+ 'volume_id': "fakeid"})
+ resp = self._issue_backup_reset(ctx,
+ backup,
+ {'status': 'error'})
+ # request is not authorized
+ self.assertEqual(resp.status_int, 403)
+
+ def test_backup_reset_status(self):
+ ctx = context.RequestContext('admin', 'fake', True)
+ volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
+ 'provider_location': '', 'size': 1})
+ backup = db.backup_create(ctx, {'status': 'available',
+ 'volume_id': volume['id']})
+
+ resp = self._issue_backup_reset(ctx,
+ backup,
+ {'status': 'error'})
+
+ self.assertEqual(resp.status_int, 202)
+
+ def test_invalid_status_for_backup(self):
+ ctx = context.RequestContext('admin', 'fake', True)
+ volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
+ 'provider_location': '', 'size': 1})
+ backup = db.backup_create(ctx, {'status': 'available',
+ 'volume_id': volume['id']})
+ resp = self._issue_backup_reset(ctx,
+ backup,
+ {'status': 'restoring'})
+ self.assertEqual(resp.status_int, 400)
+
def test_malformed_reset_status_body(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available', 'size': 1})
ctx = context.RequestContext('admin', 'fake', True)
volume = self._migrate_volume_comp_exec(ctx, volume, new_volume, False,
expected_status, expected_id)
+
+ def test_backup_reset_valid_updates(self):
+ vac = admin_actions.BackupAdminController()
+ vac.validate_update({'status': 'available'})
+ vac.validate_update({'status': 'error'})
+ self.assertRaises(exc.HTTPBadRequest,
+ vac.validate_update,
+ {'status': 'restoring'})
+ self.assertRaises(exc.HTTPBadRequest,
+ vac.validate_update,
+ {'status': 'creating'})
"volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
"volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
+ "volume_extension:backup_admin_actions:reset_status": "rule:admin_api",
"volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
"volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api",
"volume_extension:volume_admin_actions:force_detach": "rule:admin_api",
import mock
from oslo.config import cfg
+from cinder.backup import manager
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import test
+from cinder.tests.backup.fake_service_with_verify import\
+ get_backup_driver
CONF = cfg.CONF
self.assertTrue(_mock_record_verify.called)
backup = db.backup_get(self.ctxt, imported_record)
self.assertEqual(backup['status'], 'error')
+
+ def test_backup_reset_status_from_nonrestoring_to_available(
+ self):
+ vol_id = self._create_volume_db_entry(status='available',
+ size=1)
+ backup_id = self._create_backup_db_entry(status='error',
+ volume_id=vol_id)
+ with mock.patch.object(manager.BackupManager,
+ '_map_service_to_driver') as \
+ mock_map_service_to_driver:
+ mock_map_service_to_driver.return_value = \
+ get_backup_driver(self.ctxt)
+ self.backup_mgr.reset_status(self.ctxt,
+ backup_id,
+ 'available')
+ backup = db.backup_get(self.ctxt, backup_id)
+ self.assertEqual(backup['status'], 'available')
+
+ def test_backup_reset_status_to_available_invalid_backup(self):
+ volume = db.volume_create(self.ctxt, {'status': 'available',
+ 'host': 'test',
+ 'provider_location': '',
+ 'size': 1})
+ backup = db.backup_create(self.ctxt,
+ {'status': 'error',
+ 'service':
+ CONF.backup_driver,
+ 'volume_id': volume['id']})
+
+ backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
+ _mock_backup_verify_class = ('%s.%s.%s' %
+ (backup_driver.__module__,
+ backup_driver.__class__.__name__,
+ 'verify'))
+ with mock.patch(_mock_backup_verify_class) as \
+ _mock_record_verify:
+ _mock_record_verify.side_effect = \
+ exception.BackupVerifyUnsupportedDriver(reason='fake')
+
+ self.assertRaises(exception.BackupVerifyUnsupportedDriver,
+ self.backup_mgr.reset_status,
+ self.ctxt,
+ backup['id'],
+ 'available')
+ backup = db.backup_get(self.ctxt, backup['id'])
+ self.assertEqual(backup['status'], 'error')
+
+ def test_backup_reset_status_from_restoring_to_available(self):
+ volume = db.volume_create(self.ctxt,
+ {'status': 'available',
+ 'host': 'test',
+ 'provider_location': '',
+ 'size': 1})
+ backup = db.backup_create(self.ctxt,
+ {'status': 'restoring',
+ 'service':
+ CONF.backup_driver,
+ 'volume_id': volume['id']})
+
+ self.backup_mgr.reset_status(self.ctxt,
+ backup['id'],
+ 'available')
+ backup = db.backup_get(self.ctxt, backup['id'])
+ self.assertEqual(backup['status'], 'available')
+
+ def test_backup_reset_status_to_error(self):
+ volume = db.volume_create(self.ctxt,
+ {'status': 'available',
+ 'host': 'test',
+ 'provider_location': '',
+ 'size': 1})
+ backup = db.backup_create(self.ctxt,
+ {'status': 'creating',
+ 'service':
+ CONF.backup_driver,
+ 'volume_id': volume['id']})
+ self.backup_mgr.reset_status(self.ctxt,
+ backup['id'],
+ 'error')
+ backup = db.backup_get(self.ctxt, backup['id'])
+ self.assertEqual(backup['status'], 'error')
"volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
"volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
+ "volume_extension:backup_admin_actions:reset_status": "rule:admin_api",
"volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
"volume_extension:volume_admin_actions:force_detach": "rule:admin_api",
"volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api",