From: Gorka Eguileor Date: Sun, 12 Jul 2015 11:30:03 +0000 (+0200) Subject: Fix backup metadata import missing fields X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=dbdb4cda00c01ed2ae5181e78559777216e0aa5f;p=openstack-build%2Fcinder-build.git Fix backup metadata import missing fields When importing backup metadata only required fields were being imported, and all other fields were being ignored. This led to some serious problems when importing backups back, since you would lose not only the original Volume ID, but also relevant parent id information needed to restore incremental backups. This patch fixes this by importing everything back and creating backup records with the right ID. Closes-Bug: #1455043 Closes-Bug: #1476416 Depends-On: Id7ab6e174c1fe85772477f03059c4f457c5c8b17 Change-Id: Ia42ba73e9078d993c63f8e16308151ad11721ea9 --- diff --git a/cinder/backup/api.py b/cinder/backup/api.py index 11e7f2089..b7ac2d330 100644 --- a/cinder/backup/api.py +++ b/cinder/backup/api.py @@ -19,7 +19,6 @@ Handles all requests relating to the volume backups service. """ - from eventlet import greenthread from oslo_config import cfg from oslo_log import log as logging @@ -370,6 +369,68 @@ class API(base.Base): return export_data + def _get_import_backup(self, context, backup_url): + """Prepare database backup record for import. + + This method decodes provided backup_url and expects to find the id of + the backup in there. + + Then checks the DB for the presence of this backup record and if it + finds it and is not deleted it will raise an exception because the + record cannot be created or used. + + If the record is in deleted status then we must be trying to recover + this record, so we'll reuse it. + + If the record doesn't already exist we create it with provided id. + + :param context: running context + :param backup_url: backup description to be used by the backup driver + :return: BackupImport object + :raises: InvalidBackup + :raises: InvalidInput + """ + # Deserialize string backup record into a dictionary + backup_record = objects.Backup.decode_record(backup_url) + + # ID is a required field since it's what links incremental backups + if 'id' not in backup_record: + msg = _('Provided backup record is missing an id') + raise exception.InvalidInput(reason=msg) + + kwargs = { + 'user_id': context.user_id, + 'project_id': context.project_id, + 'volume_id': '0000-0000-0000-0000', + 'status': 'creating', + } + + try: + # Try to get the backup with that ID in all projects even among + # deleted entries. + backup = objects.BackupImport.get_by_id(context, + backup_record['id'], + read_deleted='yes', + project_only=False) + + # If record exists and it's not deleted we cannot proceed with the + # import + if backup.status != 'deleted': + msg = _('Backup already exists in database.') + raise exception.InvalidBackup(reason=msg) + + # Otherwise we'll "revive" delete backup record + backup.update(kwargs) + backup.save() + + except exception.BackupNotFound: + # If record doesn't exist create it with the specific ID + backup = objects.BackupImport(context=context, + id=backup_record['id'], **kwargs) + backup.create() + + return backup + def import_record(self, context, backup_service, backup_url): """Make the RPC call to import a volume backup. @@ -378,6 +439,7 @@ class API(base.Base): :param backup_url: backup description to be used by the backup driver :raises: InvalidBackup :raises: ServiceNotFound + :raises: InvalidInput """ check_policy(context, 'backup-import') @@ -391,14 +453,9 @@ class API(base.Base): if len(hosts) == 0: raise exception.ServiceNotFound(service_id=backup_service) - kwargs = { - 'user_id': context.user_id, - 'project_id': context.project_id, - 'volume_id': '0000-0000-0000-0000', - 'status': 'creating', - } - backup = objects.Backup(context=context, **kwargs) - backup.create() + # Get Backup object that will be used to import this backup record + backup = self._get_import_backup(context, backup_url) + first_host = hosts.pop() self.backup_rpcapi.import_record(context, first_host, diff --git a/cinder/backup/manager.py b/cinder/backup/manager.py index 670273fb7..2e3e19458 100644 --- a/cinder/backup/manager.py +++ b/cinder/backup/manager.py @@ -661,28 +661,48 @@ class BackupManager(manager.SchedulerDependentManager): self._update_backup_error(backup, context, msg) raise exception.InvalidBackup(reason=msg) - required_import_options = ['display_name', - 'display_description', - 'container', - 'size', - 'service_metadata', - 'service', - 'object_count'] - - backup_update = {} - backup_update['status'] = 'available' - backup_update['service'] = self.driver_name - backup_update['availability_zone'] = self.az - backup_update['host'] = self.host - for entry in required_import_options: - if entry not in backup_options: - msg = (_('Backup metadata received from driver for ' - 'import is missing %s.'), entry) - self._update_backup_error(backup, context, msg) - raise exception.InvalidBackup(reason=msg) - backup_update[entry] = backup_options[entry] + required_import_options = { + 'display_name', + 'display_description', + 'container', + 'size', + 'service_metadata', + 'service', + 'object_count', + 'id' + } + + # Check for missing fields in imported data + missing_opts = required_import_options - set(backup_options) + if missing_opts: + msg = (_('Driver successfully decoded imported backup data, ' + 'but there are missing fields (%s).') % + ', '.join(missing_opts)) + self._update_backup_error(backup, context, msg) + raise exception.InvalidBackup(reason=msg) + + # Confirm the ID from the record in the DB is the right one + backup_id = backup_options['id'] + if backup_id != backup.id: + msg = (_('Trying to import backup metadata from id %(meta_id)s' + ' into backup %(id)s.') % + {'meta_id': backup_id, 'id': backup.id}) + self._update_backup_error(backup, context, msg) + raise exception.InvalidBackup(reason=msg) + + # Overwrite some fields + backup_options['status'] = 'available' + backup_options['service'] = self.driver_name + backup_options['availability_zone'] = self.az + backup_options['host'] = self.host + + # Remove some values which are not actual fields and some that + # were set by the API node + for key in ('name', 'user_id', 'project_id'): + backup_options.pop(key, None) + # Update the database - backup.update(backup_update) + backup.update(backup_options) backup.save() # Verify backup diff --git a/cinder/db/api.py b/cinder/db/api.py index abf370f41..c3af425e0 100644 --- a/cinder/db/api.py +++ b/cinder/db/api.py @@ -848,9 +848,9 @@ def reservation_expire(context): ################### -def backup_get(context, backup_id): +def backup_get(context, backup_id, read_deleted=None, project_only=True): """Get a backup or raise if it does not exist.""" - return IMPL.backup_get(context, backup_id) + return IMPL.backup_get(context, backup_id, read_deleted, project_only) def backup_get_all(context, filters=None, marker=None, limit=None, diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index 2496823a8..4a5d5ae2a 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -3420,13 +3420,17 @@ def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): @require_context -def backup_get(context, backup_id): +def backup_get(context, backup_id, read_deleted=None, project_only=True): return _backup_get(context, backup_id) -def _backup_get(context, backup_id, session=None): +def _backup_get(context, backup_id, session=None, read_deleted=None, + project_only=True): result = model_query(context, models.Backup, session=session, - project_only=True).filter_by(id=backup_id).first() + project_only=project_only, + read_deleted=read_deleted).\ + filter_by(id=backup_id).\ + first() if not result: raise exception.BackupNotFound(backup_id=backup_id) diff --git a/cinder/objects/backup.py b/cinder/objects/backup.py index 7f400ca6d..1cbbaeb44 100644 --- a/cinder/objects/backup.py +++ b/cinder/objects/backup.py @@ -102,8 +102,9 @@ class Backup(base.CinderPersistentObject, base.CinderObject, return backup @base.remotable_classmethod - def get_by_id(cls, context, id): - db_backup = db.backup_get(context, id) + def get_by_id(cls, context, id, read_deleted=None, project_only=None): + db_backup = db.backup_get(context, id, read_deleted=read_deleted, + project_only=project_only) return cls._from_db_object(context, cls(context), db_backup) @base.remotable @@ -146,7 +147,13 @@ class Backup(base.CinderPersistentObject, base.CinderObject, @base.remotable def encode_record(self, **kwargs): """Serialize backup object, with optional extra info, into a string.""" - kwargs.update(self) + # We don't want to export extra fields and we want to force lazy + # loading, so we can't use dict(self) or self.obj_to_primitive + record = {name: field.to_primitive(self, name, getattr(self, name)) + for name, field in self.fields.items()} + # We must update kwargs instead of record to ensure we don't overwrite + # "real" data from the backup + kwargs.update(record) retval = jsonutils.dumps(kwargs) if six.PY3: retval = retval.encode('utf-8') @@ -193,3 +200,25 @@ class BackupList(base.ObjectListBase, base.CinderObject): backups = db.backup_get_all_by_volume(context, volume_id, filters) return base.obj_make_list(context, cls(context), objects.Backup, backups) + + +@base.CinderObjectRegistry.register +class BackupImport(Backup): + """Special object for Backup Imports. + + This class should not be used for anything but Backup creation when + importing backups to the DB. + + On creation it allows to specify the ID for the backup, since it's the + reference used in parent_id it is imperative that this is preserved. + + Backup Import objects get promoted to standard Backups when the import is + completed. + """ + + @base.remotable + def create(self): + updates = self.cinder_obj_get_changes() + + db_backup = db.backup_create(self._context, updates) + self._from_db_object(self._context, self, db_backup) diff --git a/cinder/tests/unit/api/contrib/test_backups.py b/cinder/tests/unit/api/contrib/test_backups.py index c0827c3ff..863987aa3 100644 --- a/cinder/tests/unit/api/contrib/test_backups.py +++ b/cinder/tests/unit/api/contrib/test_backups.py @@ -30,6 +30,7 @@ from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ +from cinder import objects from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import utils @@ -1599,20 +1600,57 @@ class BackupsAPITestCase(test.TestCase): def test_import_record_volume_id_specified_json(self, _mock_import_record_rpc, _mock_list_services): + utils.replace_obj_loader(self, objects.Backup) + project_id = 'fake' + backup_service = 'fake' + ctx = context.RequestContext('admin', project_id, is_admin=True) + backup = objects.Backup(ctx, id='id', user_id='user_id', + project_id=project_id, status='available') + backup_url = backup.encode_record() + _mock_import_record_rpc.return_value = None + _mock_list_services.return_value = [backup_service] + + req = webob.Request.blank('/v2/fake/backups/import_record') + body = {'backup-record': {'backup_service': backup_service, + 'backup_url': backup_url}} + req.body = json.dumps(body) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + + res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) + res_dict = json.loads(res.body) + + # verify that request is successful + self.assertEqual(201, res.status_int) + self.assertIn('id', res_dict['backup']) + self.assertEqual('id', res_dict['backup']['id']) + + # Verify that entry in DB is as expected + db_backup = objects.Backup.get_by_id(ctx, 'id') + self.assertEqual(ctx.project_id, db_backup.project_id) + self.assertEqual(ctx.user_id, db_backup.user_id) + self.assertEqual('0000-0000-0000-0000', db_backup.volume_id) + self.assertEqual('creating', db_backup.status) + + @mock.patch('cinder.backup.api.API._list_backup_services') + @mock.patch('cinder.backup.rpcapi.BackupAPI.import_record') + def test_import_record_volume_id_exists_deleted(self, + _mock_import_record_rpc, + _mock_list_services): ctx = context.RequestContext('admin', 'fake', is_admin=True) + utils.replace_obj_loader(self, objects.Backup) + + # Original backup belonged to a different user_id and project_id + backup = objects.Backup(ctx, id='id', user_id='original_user_id', + project_id='original_project_id', + status='available') + backup_url = backup.encode_record() + + # Deleted DB entry has project_id and user_id set to fake + backup_id = self._create_backup('id', status='deleted') backup_service = 'fake' - backup_url = 'fake' - _mock_import_record_rpc.return_value = \ - {'display_name': 'fake', - 'display_description': 'fake', - 'container': 'fake', - 'size': 1, - 'service_metadata': 'fake', - 'service': 'fake', - 'object_count': 1, - 'status': 'available', - 'availability_zone': 'fake'} - _mock_list_services.return_value = ['fake'] + _mock_import_record_rpc.return_value = None + _mock_list_services.return_value = [backup_service] req = webob.Request.blank('/v2/fake/backups/import_record') body = {'backup-record': {'backup_service': backup_service, @@ -1623,29 +1661,35 @@ class BackupsAPITestCase(test.TestCase): res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = json.loads(res.body) + # verify that request is successful self.assertEqual(201, res.status_int) - self.assertTrue('id' in res_dict['backup']) + self.assertIn('id', res_dict['backup']) + self.assertEqual('id', res_dict['backup']['id']) + + # Verify that entry in DB is as expected, with new project and user_id + db_backup = objects.Backup.get_by_id(ctx, 'id') + self.assertEqual(ctx.project_id, db_backup.project_id) + self.assertEqual(ctx.user_id, db_backup.user_id) + self.assertEqual('0000-0000-0000-0000', db_backup.volume_id) + self.assertEqual('creating', db_backup.status) + + db.backup_destroy(context.get_admin_context(), backup_id) @mock.patch('cinder.backup.api.API._list_backup_services') @mock.patch('cinder.backup.rpcapi.BackupAPI.import_record') def test_import_record_volume_id_specified_xml(self, _mock_import_record_rpc, _mock_list_services): - ctx = context.RequestContext('admin', 'fake', is_admin=True) + utils.replace_obj_loader(self, objects.Backup) + project_id = 'fake' backup_service = 'fake' - backup_url = 'fake' - _mock_import_record_rpc.return_value = \ - {'display_name': 'fake', - 'display_description': 'fake', - 'container': 'fake', - 'size': 1, - 'service_metadata': 'fake', - 'service': 'fake', - 'object_count': 1, - 'status': 'available', - 'availability_zone': 'fake'} - _mock_list_services.return_value = ['fake'] + ctx = context.RequestContext('admin', project_id, is_admin=True) + backup = objects.Backup(ctx, id='id', user_id='user_id', + project_id=project_id, status='available') + backup_url = backup.encode_record() + _mock_import_record_rpc.return_value = None + _mock_list_services.return_value = [backup_service] req = webob.Request.blank('/v2/fake/backups/import_record') req.body = ('