]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Add reset-state function for backups
authorling-yun <zengyunling@huawei.com>
Tue, 26 Aug 2014 10:39:30 +0000 (18:39 +0800)
committerling-yun <zengyunling@huawei.com>
Fri, 12 Sep 2014 07:32:17 +0000 (15:32 +0800)
Since there are volume reset-state function and snapshot reset-state
function, backup also needs reset-state as well.
When creating or restoring backup, it may leave the backup stuck in
creating or restoring status due to database down or rabbitmq down, etc.
Currently we could only solve these problems by restarting cinder-backup
service.

This BP is to provide another means for administrators to solve these
problems by calling backup reset state API, instead of directly
restarting cinder-backup service.

DocImpact: Support reset backup status

blueprint support-reset-state-for-backup

Change-Id: Icd677a0b48083894bcd969d5199fa91b307080de

cinder/api/contrib/admin_actions.py
cinder/api/contrib/backups.py
cinder/backup/api.py
cinder/backup/manager.py
cinder/backup/rpcapi.py
cinder/exception.py
cinder/tests/api/contrib/test_admin_actions.py
cinder/tests/policy.json
cinder/tests/test_backup.py
etc/cinder/policy.json

index bfd6f94d639219ffd10efb5b4e836abe3fa941c2..16fe9902dcd01f20d34b1dc682fc09458db9332f 100644 (file)
@@ -17,6 +17,7 @@ from webob import exc
 
 from cinder.api import extensions
 from cinder.api.openstack import wsgi
+from cinder import backup
 from cinder import db
 from cinder import exception
 from cinder.i18n import _
@@ -47,6 +48,7 @@ class AdminController(wsgi.Controller):
         # singular name of the resource
         self.resource_name = self.collection.rstrip('s')
         self.volume_api = volume.API()
+        self.backup_api = backup.API()
 
     def _update(self, *args, **kwargs):
         raise NotImplementedError()
@@ -254,6 +256,38 @@ class SnapshotAdminController(AdminController):
         return self.volume_api.delete_snapshot(*args, **kwargs)
 
 
+class BackupAdminController(AdminController):
+    """AdminController for Backups."""
+
+    collection = 'backups'
+
+    valid_status = set(['available',
+                        'error'
+                        ])
+
+    @wsgi.action('os-reset_status')
+    def _reset_status(self, req, id, body):
+        """Reset status on the resource."""
+        context = req.environ['cinder.context']
+        self.authorize(context, 'reset_status')
+        update = self.validate_update(body['os-reset_status'])
+        msg = "Updating %(resource)s '%(id)s' with '%(update)r'"
+        LOG.debug(msg, {'resource': self.resource_name, 'id': id,
+                        'update': update})
+
+        notifier_info = {'id': id, 'update': update}
+        notifier = rpc.get_notifier('backupStatusUpdate')
+        notifier.info(context, self.collection + '.reset_status.start',
+                      notifier_info)
+
+        try:
+            self.backup_api.reset_status(context=context, backup_id=id,
+                                         status=update['status'])
+        except exception.NotFound as e:
+            raise exc.HTTPNotFound(explanation=e.msg)
+        return webob.Response(status_int=202)
+
+
 class Admin_actions(extensions.ExtensionDescriptor):
     """Enable admin actions."""
 
@@ -264,7 +298,8 @@ class Admin_actions(extensions.ExtensionDescriptor):
 
     def get_controller_extensions(self):
         exts = []
-        for class_ in (VolumeAdminController, SnapshotAdminController):
+        for class_ in (VolumeAdminController, SnapshotAdminController,
+                       BackupAdminController):
             controller = class_()
             extension = extensions.ControllerExtension(
                 self, class_.collection, controller)
index 8ce51dc79d8fa6eedbec82fdbef8070da5ddbbbf..ab11deff7695e7d9d1aad50df3ce9105ec3d5210 100644 (file)
@@ -378,6 +378,7 @@ class Backups(extensions.ExtensionDescriptor):
         res = extensions.ResourceExtension(
             Backups.alias, BackupsController(),
             collection_actions={'detail': 'GET', 'import_record': 'POST'},
-            member_actions={'restore': 'POST', 'export_record': 'GET'})
+            member_actions={'restore': 'POST', 'export_record': 'GET',
+                            'action': 'POST'})
         resources.append(res)
         return resources
index f262ce28603a09bb97c1e3da1bed378b0ff3040e..1d2c4171b3dd64f52cecad576488c722f98c6e47 100644 (file)
@@ -258,6 +258,21 @@ class API(base.Base):
 
         return d
 
+    def reset_status(self, context, backup_id, status):
+        """Make the RPC call to reset a volume backup's status.
+
+        Call backup manager to execute backup status reset operation.
+        :param context: running context
+        :param backup_id: which backup's status to be reset
+        :parma status: backup's status to be reset
+        :raises: InvalidBackup
+        """
+        # get backup info
+        backup = self.get(context, backup_id)
+        # send to manager to do reset operation
+        self.backup_rpcapi.reset_status(ctxt=context, host=backup['host'],
+                                        backup_id=backup_id, status=status)
+
     def export_record(self, context, backup_id):
         """Make the RPC call to export a volume backup.
 
index 95adc4d12ef9b55a621414fc31dd95e5d9034ff8..a2f53466a7654868f0cbca0393daa5747efc48bd 100644 (file)
@@ -46,6 +46,7 @@ from cinder.openstack.common import excutils
 from cinder.openstack.common import importutils
 from cinder.openstack.common import log as logging
 from cinder import quota
+from cinder import rpc
 from cinder import utils
 from cinder.volume import utils as volume_utils
 
@@ -601,3 +602,97 @@ class BackupManager(manager.SchedulerDependentManager):
 
             LOG.info(_('Import record id %s metadata from driver '
                        'finished.') % backup_id)
+
+    def reset_status(self, context, backup_id, status):
+        """Reset volume backup status.
+
+        :param context: running context
+        :param backup_id: The backup id for reset status operation
+        :param status: The status to be set
+        :raises: InvalidBackup
+        :raises: BackupVerifyUnsupportedDriver
+        :raises: AttributeError
+        """
+        LOG.info(_('Reset backup status started, backup_id: '
+                   '%(backup_id)s, status: %(status)s.'),
+                 {'backup_id': backup_id,
+                  'status': status})
+        try:
+            # NOTE(flaper87): Verify the driver is enabled
+            # before going forward. The exception will be caught
+            # and the backup status updated. Fail early since there
+            # are no other status to change but backup's
+            utils.require_driver_initialized(self.driver)
+        except exception.DriverNotInitialized:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_("Backup driver has not been initialized"))
+
+        backup = self.db.backup_get(context, backup_id)
+        backup_service = self._map_service_to_driver(backup['service'])
+        LOG.info(_('Backup service: %s.'), backup_service)
+        if backup_service is not None:
+            configured_service = self.driver_name
+            if backup_service != configured_service:
+                err = _('Reset backup status aborted, the backup service'
+                        ' currently configured [%(configured_service)s] '
+                        'is not the backup service that was used to create'
+                        ' this backup [%(backup_service)s].') % \
+                    {'configured_service': configured_service,
+                     'backup_service': backup_service}
+                raise exception.InvalidBackup(reason=err)
+            # Verify backup
+            try:
+                # check whether the backup is ok or not
+                if status == 'available' and backup['status'] != 'restoring':
+                    # check whether we could verify the backup is ok or not
+                    if isinstance(backup_service,
+                                  driver.BackupDriverWithVerify):
+                        backup_service.verify(backup_id)
+                        self.db.backup_update(context, backup_id,
+                                              {'status': status})
+                    # driver does not support verify function
+                    else:
+                        msg = (_('Backup service %(configured_service)s '
+                                 'does not support verify. Backup id'
+                                 ' %(id)s is not verified. '
+                                 'Skipping verify.') %
+                               {'configured_service': self.driver_name,
+                                'id': backup_id})
+                        raise exception.BackupVerifyUnsupportedDriver(
+                            reason=msg)
+                # reset status to error or from restoring to available
+                else:
+                    if (status == 'error' or
+                        (status == 'available' and
+                            backup['status'] == 'restoring')):
+                        self.db.backup_update(context, backup_id,
+                                              {'status': status})
+            except exception.InvalidBackup:
+                with excutils.save_and_reraise_exception():
+                    msg = (_("Backup id %(id)s is not invalid. "
+                             "Skipping reset.") % {'id': backup_id})
+                    LOG.error(msg)
+            except exception.BackupVerifyUnsupportedDriver:
+                with excutils.save_and_reraise_exception():
+                    msg = (_('Backup service %(configured_service)s '
+                             'does not support verify. Backup id'
+                             ' %(id)s is not verified. '
+                             'Skipping verify.') %
+                           {'configured_service': self.driver_name,
+                            'id': backup_id})
+                    LOG.error(msg)
+            except AttributeError:
+                msg = (_('Backup service %(service)s does not support '
+                         'verify. Backup id %(id)s is not verified. '
+                         'Skipping reset.') %
+                       {'service': self.driver_name,
+                        'id': backup_id})
+                LOG.error(msg)
+                raise exception.BackupVerifyUnsupportedDriver(
+                    reason=msg)
+
+            # send notification to ceilometer
+            notifier_info = {'id': backup_id, 'update': {'status': status}}
+            notifier = rpc.get_notifier('backupStatusUpdate')
+            notifier.info(context, "backups" + '.reset_status.end',
+                          notifier_info)
\ No newline at end of file
index 5767adadc2ac8cc57cc0170db114868a55db2549..7258eb4da2d29dd990586216d1ac6c02a797183f 100644 (file)
@@ -88,3 +88,12 @@ class BackupAPI(object):
                    backup_service=backup_service,
                    backup_url=backup_url,
                    backup_hosts=backup_hosts)
+
+    def reset_status(self, ctxt, host, backup_id, status):
+        LOG.debug("reset_status in rpcapi backup_id %(id)s "
+                  "on host %(host)s.",
+                  {'id': backup_id,
+                   'host': host})
+        cctxt = self.client.prepare(server=host)
+        return cctxt.cast(ctxt, 'reset_status', backup_id=backup_id,
+                          status=status)
\ No newline at end of file
index faa1d4978bf0a3dc0511d0597378d9d4c9ac17ba..f53fdceda9a48ebff2ad2a5d021e857756fac008 100644 (file)
@@ -514,6 +514,10 @@ class BackupMetadataUnsupportedVersion(BackupDriverException):
     message = _("Unsupported backup metadata version requested")
 
 
+class BackupVerifyUnsupportedDriver(BackupDriverException):
+    message = _("Unsupported backup verify driver")
+
+
 class VolumeMetadataBackupExists(BackupDriverException):
     message = _("Metadata backup already exists for this volume")
 
index c8c3a259f84417393bda874a1dda40acd162fb34..64963f9e31fd8a6bb9742897936e74c6c74722d1 100644 (file)
@@ -15,6 +15,7 @@ import tempfile
 
 from oslo.config import cfg
 import webob
+from webob import exc
 
 from cinder.api.contrib import admin_actions
 from cinder.brick.local_dev import lvm as brick_lvm
@@ -75,6 +76,16 @@ class AdminActionsTest(test.TestCase):
         resp = req.get_response(app())
         return resp
 
+    def _issue_backup_reset(self, ctx, backup, updated_status):
+        req = webob.Request.blank('/v2/fake/backups/%s/action' % backup['id'])
+        req.method = 'POST'
+        req.headers['content-type'] = 'application/json'
+        req.body = \
+            jsonutils.dumps({'os-reset_status': updated_status})
+        req.environ['cinder.context'] = ctx
+        resp = req.get_response(app())
+        return resp
+
     def test_valid_updates(self):
         vac = admin_actions.VolumeAdminController()
 
@@ -168,6 +179,54 @@ class AdminActionsTest(test.TestCase):
         # status is still 'error'
         self.assertEqual(volume['status'], 'error')
 
+    def test_backup_reset_status_as_admin(self):
+        ctx = context.RequestContext('admin', 'fake', True)
+        volume = db.volume_create(ctx, {'status': 'available'})
+        backup = db.backup_create(ctx, {'status': 'available',
+                                        'size': 1,
+                                        'volume_id': volume['id']})
+
+        resp = self._issue_backup_reset(ctx,
+                                        backup,
+                                        {'status': 'error'})
+
+        self.assertEqual(resp.status_int, 202)
+
+    def test_backup_reset_status_as_non_admin(self):
+        ctx = context.RequestContext('fake', 'fake')
+        backup = db.backup_create(ctx, {'status': 'available',
+                                        'size': 1,
+                                        'volume_id': "fakeid"})
+        resp = self._issue_backup_reset(ctx,
+                                        backup,
+                                        {'status': 'error'})
+        # request is not authorized
+        self.assertEqual(resp.status_int, 403)
+
+    def test_backup_reset_status(self):
+        ctx = context.RequestContext('admin', 'fake', True)
+        volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
+                                        'provider_location': '', 'size': 1})
+        backup = db.backup_create(ctx, {'status': 'available',
+                                        'volume_id': volume['id']})
+
+        resp = self._issue_backup_reset(ctx,
+                                        backup,
+                                        {'status': 'error'})
+
+        self.assertEqual(resp.status_int, 202)
+
+    def test_invalid_status_for_backup(self):
+        ctx = context.RequestContext('admin', 'fake', True)
+        volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
+                                        'provider_location': '', 'size': 1})
+        backup = db.backup_create(ctx, {'status': 'available',
+                                        'volume_id': volume['id']})
+        resp = self._issue_backup_reset(ctx,
+                                        backup,
+                                        {'status': 'restoring'})
+        self.assertEqual(resp.status_int, 400)
+
     def test_malformed_reset_status_body(self):
         ctx = context.RequestContext('admin', 'fake', True)
         volume = db.volume_create(ctx, {'status': 'available', 'size': 1})
@@ -761,3 +820,14 @@ class AdminActionsTest(test.TestCase):
         ctx = context.RequestContext('admin', 'fake', True)
         volume = self._migrate_volume_comp_exec(ctx, volume, new_volume, False,
                                                 expected_status, expected_id)
+
+    def test_backup_reset_valid_updates(self):
+        vac = admin_actions.BackupAdminController()
+        vac.validate_update({'status': 'available'})
+        vac.validate_update({'status': 'error'})
+        self.assertRaises(exc.HTTPBadRequest,
+                          vac.validate_update,
+                          {'status': 'restoring'})
+        self.assertRaises(exc.HTTPBadRequest,
+                          vac.validate_update,
+                          {'status': 'creating'})
index f52746929ae16c8425d27c0d54ced79165fa8fa4..a4d29661078c3086a75e279013e049aa7514e91f 100644 (file)
@@ -36,6 +36,7 @@
 
     "volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
     "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
+    "volume_extension:backup_admin_actions:reset_status": "rule:admin_api",
     "volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
     "volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api",
     "volume_extension:volume_admin_actions:force_detach": "rule:admin_api",
index 7307c97b846cd3d72acd8daa244707a8b1b4a383..2ebbb1099ada0601a415f37069b7ee65b0761273 100644 (file)
@@ -22,6 +22,7 @@ import tempfile
 import mock
 from oslo.config import cfg
 
+from cinder.backup import manager
 from cinder import context
 from cinder import db
 from cinder import exception
@@ -29,6 +30,8 @@ from cinder.openstack.common import importutils
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import timeutils
 from cinder import test
+from cinder.tests.backup.fake_service_with_verify import\
+    get_backup_driver
 
 
 CONF = cfg.CONF
@@ -599,3 +602,84 @@ class BackupTestCaseWithVerify(BaseBackupTest):
             self.assertTrue(_mock_record_verify.called)
         backup = db.backup_get(self.ctxt, imported_record)
         self.assertEqual(backup['status'], 'error')
+
+    def test_backup_reset_status_from_nonrestoring_to_available(
+            self):
+        vol_id = self._create_volume_db_entry(status='available',
+                                              size=1)
+        backup_id = self._create_backup_db_entry(status='error',
+                                                 volume_id=vol_id)
+        with mock.patch.object(manager.BackupManager,
+                               '_map_service_to_driver') as \
+                mock_map_service_to_driver:
+            mock_map_service_to_driver.return_value = \
+                get_backup_driver(self.ctxt)
+            self.backup_mgr.reset_status(self.ctxt,
+                                         backup_id,
+                                         'available')
+        backup = db.backup_get(self.ctxt, backup_id)
+        self.assertEqual(backup['status'], 'available')
+
+    def test_backup_reset_status_to_available_invalid_backup(self):
+        volume = db.volume_create(self.ctxt, {'status': 'available',
+                                              'host': 'test',
+                                              'provider_location': '',
+                                              'size': 1})
+        backup = db.backup_create(self.ctxt,
+                                  {'status': 'error',
+                                   'service':
+                                   CONF.backup_driver,
+                                   'volume_id': volume['id']})
+
+        backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
+        _mock_backup_verify_class = ('%s.%s.%s' %
+                                     (backup_driver.__module__,
+                                      backup_driver.__class__.__name__,
+                                      'verify'))
+        with mock.patch(_mock_backup_verify_class) as \
+                _mock_record_verify:
+            _mock_record_verify.side_effect = \
+                exception.BackupVerifyUnsupportedDriver(reason='fake')
+
+            self.assertRaises(exception.BackupVerifyUnsupportedDriver,
+                              self.backup_mgr.reset_status,
+                              self.ctxt,
+                              backup['id'],
+                              'available')
+            backup = db.backup_get(self.ctxt, backup['id'])
+            self.assertEqual(backup['status'], 'error')
+
+    def test_backup_reset_status_from_restoring_to_available(self):
+        volume = db.volume_create(self.ctxt,
+                                  {'status': 'available',
+                                   'host': 'test',
+                                   'provider_location': '',
+                                   'size': 1})
+        backup = db.backup_create(self.ctxt,
+                                  {'status': 'restoring',
+                                   'service':
+                                   CONF.backup_driver,
+                                   'volume_id': volume['id']})
+
+        self.backup_mgr.reset_status(self.ctxt,
+                                     backup['id'],
+                                     'available')
+        backup = db.backup_get(self.ctxt, backup['id'])
+        self.assertEqual(backup['status'], 'available')
+
+    def test_backup_reset_status_to_error(self):
+        volume = db.volume_create(self.ctxt,
+                                  {'status': 'available',
+                                   'host': 'test',
+                                   'provider_location': '',
+                                   'size': 1})
+        backup = db.backup_create(self.ctxt,
+                                  {'status': 'creating',
+                                   'service':
+                                   CONF.backup_driver,
+                                   'volume_id': volume['id']})
+        self.backup_mgr.reset_status(self.ctxt,
+                                     backup['id'],
+                                     'error')
+        backup = db.backup_get(self.ctxt, backup['id'])
+        self.assertEqual(backup['status'], 'error')
index bd686fbe7baebdf01348958144aec033f4972cab..d6f7792147ec834d9af938587dedaddf87ae7f92 100644 (file)
@@ -30,6 +30,7 @@
 
     "volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
     "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
+    "volume_extension:backup_admin_actions:reset_status": "rule:admin_api",
     "volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
     "volume_extension:volume_admin_actions:force_detach": "rule:admin_api",
     "volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api",