From: Xing Yang Date: Sat, 10 Oct 2015 01:57:18 +0000 (-0400) Subject: Backup snapshots X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=dbc345729e84ee33054c4662d3d214215006226b;p=openstack-build%2Fcinder-build.git Backup snapshots Today we can backup a volume, but not a snapshot. This patch adds support to backup snapshots and provide another layer of data protection for the user. DocImpact implements blueprint backup-snapshots Change-Id: Ib4ab9ca9dc72b30151154f3f96037f9ce3c9c540 --- diff --git a/cinder/api/contrib/backups.py b/cinder/api/contrib/backups.py index 4cd9e2fa2..dcbf80778 100644 --- a/cinder/api/contrib/backups.py +++ b/cinder/api/contrib/backups.py @@ -263,6 +263,7 @@ class BackupsController(wsgi.Controller): description = backup.get('description', None) incremental = backup.get('incremental', False) force = backup.get('force', False) + snapshot_id = backup.get('snapshot_id', None) LOG.info(_LI("Creating backup of volume %(volume_id)s in container" " %(container)s"), {'volume_id': volume_id, 'container': container}, @@ -271,7 +272,8 @@ class BackupsController(wsgi.Controller): try: new_backup = self.backup_api.create(context, name, description, volume_id, container, - incremental, None, force) + incremental, None, force, + snapshot_id) except exception.InvalidVolume as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.VolumeNotFound as error: diff --git a/cinder/api/views/backups.py b/cinder/api/views/backups.py index 3917e2c67..7c84cc34a 100644 --- a/cinder/api/views/backups.py +++ b/cinder/api/views/backups.py @@ -78,6 +78,8 @@ class ViewBuilder(common.ViewBuilder): 'links': self._get_links(request, backup['id']), 'is_incremental': backup.is_incremental, 'has_dependent_backups': backup.has_dependent_backups, + 'snapshot_id': backup.snapshot_id, + 'data_timestamp': backup.data_timestamp, } } diff --git a/cinder/backup/api.py b/cinder/backup/api.py index 56d7e0b7c..a5ef98a28 100644 --- a/cinder/backup/api.py +++ b/cinder/backup/api.py @@ -19,11 +19,14 @@ Handles all requests relating to the volume backups service. """ +from datetime import datetime + from eventlet import greenthread from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import strutils +from pytz import timezone from cinder.backup import rpcapi as backup_rpcapi from cinder import context @@ -150,20 +153,28 @@ class API(base.Base): def create(self, context, name, description, volume_id, container, incremental=False, availability_zone=None, - force=False): + force=False, snapshot_id=None): """Make the RPC call to create a volume backup.""" check_policy(context, 'create') volume = self.volume_api.get(context, volume_id) + snapshot = None + if snapshot_id: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) if volume['status'] not in ["available", "in-use"]: msg = (_('Volume to be backed up must be available ' 'or in-use, but the current status is "%s".') % volume['status']) raise exception.InvalidVolume(reason=msg) - elif volume['status'] in ["in-use"] and not force: + elif volume['status'] in ["in-use"] and not snapshot_id and not force: msg = _('Backing up an in-use volume must use ' 'the force flag.') raise exception.InvalidVolume(reason=msg) + elif snapshot_id and snapshot['status'] not in ["available"]: + msg = (_('Snapshot to be backed up must be available, ' + 'but the current status is "%s".') + % snapshot['status']) + raise exception.InvalidSnapshot(reason=msg) previous_status = volume['status'] volume_host = volume_utils.extract_host(volume['host'], 'host') @@ -208,15 +219,36 @@ class API(base.Base): raise exception.BackupLimitExceeded( allowed=quotas[over]) - # Find the latest backup of the volume and use it as the parent - # backup to do an incremental backup. + # Find the latest backup and use it as the parent backup to do an + # incremental backup. latest_backup = None if incremental: backups = objects.BackupList.get_all_by_volume(context.elevated(), volume_id) if backups.objects: - latest_backup = max(backups.objects, - key=lambda x: x['created_at']) + # NOTE(xyang): The 'data_timestamp' field records the time + # when the data on the volume was first saved. If it is + # a backup from volume, 'data_timestamp' will be the same + # as 'created_at' for a backup. If it is a backup from a + # snapshot, 'data_timestamp' will be the same as + # 'created_at' for a snapshot. + # If not backing up from snapshot, the backup with the latest + # 'data_timestamp' will be the parent; If backing up from + # snapshot, the backup with the latest 'data_timestamp' will + # be chosen only if 'data_timestamp' is earlier than the + # 'created_at' timestamp of the snapshot; Otherwise, the + # backup will not be chosen as the parent. + # For example, a volume has a backup taken at 8:00, then + # a snapshot taken at 8:10, and then a backup at 8:20. + # When taking an incremental backup of the snapshot, the + # parent should be the backup at 8:00, not 8:20, and the + # 'data_timestamp' of this new backup will be 8:10. + latest_backup = max( + backups.objects, + key=lambda x: x['data_timestamp'] + if (not snapshot or (snapshot and x['data_timestamp'] + < snapshot['created_at'])) + else datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC'))) else: msg = _('No backups available to do an incremental backup.') raise exception.InvalidBackup(reason=msg) @@ -229,6 +261,11 @@ class API(base.Base): 'incremental backup.') raise exception.InvalidBackup(reason=msg) + data_timestamp = None + if snapshot_id: + snapshot = objects.Snapshot.get_by_id(context, snapshot_id) + data_timestamp = snapshot.created_at + self.db.volume_update(context, volume_id, {'status': 'backing-up', 'previous_status': previous_status}) @@ -244,9 +281,14 @@ class API(base.Base): 'parent_id': parent_id, 'size': volume['size'], 'host': volume_host, + 'snapshot_id': snapshot_id, + 'data_timestamp': data_timestamp, } backup = objects.Backup(context=context, **kwargs) backup.create() + if not snapshot_id: + backup.data_timestamp = backup.created_at + backup.save() QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/061_add_snapshot_id_timestamp_to_backups.py b/cinder/db/sqlalchemy/migrate_repo/versions/061_add_snapshot_id_timestamp_to_backups.py new file mode 100644 index 000000000..5b242d7ac --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/061_add_snapshot_id_timestamp_to_backups.py @@ -0,0 +1,40 @@ +# Copyright (c) 2015 EMC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, DateTime, MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + backups = Table('backups', meta, autoload=True) + snapshot_id = Column('snapshot_id', String(length=36)) + data_timestamp = Column('data_timestamp', DateTime) + + backups.create_column(snapshot_id) + backups.update().values(snapshot_id=None).execute() + + backups.create_column(data_timestamp) + backups.update().values(data_timestamp=None).execute() + + # Copy existing created_at timestamp to data_timestamp + # in the backups table. + backups_list = list(backups.select().execute()) + for backup in backups_list: + backup_id = backup.id + backups.update().\ + where(backups.c.id == backup_id).\ + values(data_timestamp=backup.created_at).execute() diff --git a/cinder/db/sqlalchemy/models.py b/cinder/db/sqlalchemy/models.py index ee9b63872..282b52e7e 100644 --- a/cinder/db/sqlalchemy/models.py +++ b/cinder/db/sqlalchemy/models.py @@ -541,6 +541,8 @@ class Backup(BASE, CinderBase): temp_volume_id = Column(String(36)) temp_snapshot_id = Column(String(36)) num_dependent_backups = Column(Integer) + snapshot_id = Column(String(36)) + data_timestamp = Column(DateTime) @validates('fail_reason') def validate_fail_reason(self, key, fail_reason): diff --git a/cinder/objects/backup.py b/cinder/objects/backup.py index 00ebb1db7..33e074e0c 100644 --- a/cinder/objects/backup.py +++ b/cinder/objects/backup.py @@ -38,7 +38,8 @@ class Backup(base.CinderPersistentObject, base.CinderObject, # Version 1.0: Initial version # Version 1.1: Add new field num_dependent_backups and extra fields # is_incremental and has_dependent_backups. - VERSION = '1.1' + # Version 1.2: Add new field snapshot_id and data_timestamp. + VERSION = '1.2' fields = { 'id': fields.UUIDField(), @@ -68,6 +69,8 @@ class Backup(base.CinderPersistentObject, base.CinderObject, 'temp_volume_id': fields.StringField(nullable=True), 'temp_snapshot_id': fields.StringField(nullable=True), 'num_dependent_backups': fields.IntegerField(), + 'snapshot_id': fields.StringField(nullable=True), + 'data_timestamp': fields.DateTimeField(nullable=True), } obj_extra_fields = ['name', 'is_incremental', 'has_dependent_backups'] diff --git a/cinder/tests/unit/api/contrib/test_backups.py b/cinder/tests/unit/api/contrib/test_backups.py index 7832036da..65aa45a07 100644 --- a/cinder/tests/unit/api/contrib/test_backups.py +++ b/cinder/tests/unit/api/contrib/test_backups.py @@ -20,6 +20,7 @@ Tests for Backup code. import json from xml.dom import minidom +import ddt import mock from oslo_utils import timeutils import webob @@ -37,7 +38,10 @@ from cinder.tests.unit import utils # needed for stubs to work import cinder.volume +NUM_ELEMENTS_IN_BACKUP = 17 + +@ddt.ddt class BackupsAPITestCase(test.TestCase): """Test Case for backups API.""" @@ -55,11 +59,12 @@ class BackupsAPITestCase(test.TestCase): display_description='this is a test backup', container='volumebackups', status='creating', - snapshot=False, incremental=False, parent_id=None, size=0, object_count=0, host='testhost', - num_dependent_backups=0): + num_dependent_backups=0, + snapshot_id=None, + data_timestamp=None): """Create a backup object.""" backup = {} backup['volume_id'] = volume_id @@ -74,21 +79,35 @@ class BackupsAPITestCase(test.TestCase): backup['fail_reason'] = '' backup['size'] = size backup['object_count'] = object_count - backup['snapshot'] = snapshot backup['incremental'] = incremental backup['parent_id'] = parent_id backup['num_dependent_backups'] = num_dependent_backups - return db.backup_create(context.get_admin_context(), backup)['id'] + backup['snapshot_id'] = snapshot_id + backup['data_timestamp'] = data_timestamp + backup = db.backup_create(context.get_admin_context(), backup) + if not snapshot_id: + db.backup_update(context.get_admin_context(), + backup['id'], + {'data_timestamp': backup['created_at']}) + return backup['id'] @staticmethod def _get_backup_attrib(backup_id, attrib_name): return db.backup_get(context.get_admin_context(), backup_id)[attrib_name] - def test_show_backup(self): + @ddt.data(False, True) + def test_show_backup(self, backup_from_snapshot): volume_id = utils.create_volume(self.context, size=5, status='creating')['id'] - backup_id = self._create_backup(volume_id) + snapshot = None + snapshot_id = None + if backup_from_snapshot: + snapshot = utils.create_snapshot(self.context, + volume_id) + snapshot_id = snapshot.id + backup_id = self._create_backup(volume_id, + snapshot_id=snapshot_id) req = webob.Request.blank('/v2/fake/backups/%s' % backup_id) req.method = 'GET' @@ -109,8 +128,11 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual(volume_id, res_dict['backup']['volume_id']) self.assertFalse(res_dict['backup']['is_incremental']) self.assertFalse(res_dict['backup']['has_dependent_backups']) + self.assertEqual(snapshot_id, res_dict['backup']['snapshot_id']) self.assertIn('updated_at', res_dict['backup']) + if snapshot: + snapshot.destroy() db.backup_destroy(context.get_admin_context(), backup_id) db.volume_destroy(context.get_admin_context(), volume_id) @@ -283,7 +305,7 @@ class BackupsAPITestCase(test.TestCase): res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) - self.assertEqual(15, len(res_dict['backups'][0])) + self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0])) self.assertEqual('az1', res_dict['backups'][0]['availability_zone']) self.assertEqual('volumebackups', res_dict['backups'][0]['container']) @@ -298,7 +320,7 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual('1', res_dict['backups'][0]['volume_id']) self.assertIn('updated_at', res_dict['backups'][0]) - self.assertEqual(15, len(res_dict['backups'][1])) + self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][1])) self.assertEqual('az1', res_dict['backups'][1]['availability_zone']) self.assertEqual('volumebackups', res_dict['backups'][1]['container']) @@ -313,7 +335,7 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual('1', res_dict['backups'][1]['volume_id']) self.assertIn('updated_at', res_dict['backups'][1]) - self.assertEqual(15, len(res_dict['backups'][2])) + self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][2])) self.assertEqual('az1', res_dict['backups'][2]['availability_zone']) self.assertEqual('volumebackups', res_dict['backups'][2]['container']) self.assertEqual('this is a test backup', @@ -469,9 +491,9 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual(200, res.status_int) self.assertEqual(2, len(res_dict['backups'])) - self.assertEqual(15, len(res_dict['backups'][0])) + self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0])) self.assertEqual(backup_id3, res_dict['backups'][0]['id']) - self.assertEqual(15, len(res_dict['backups'][1])) + self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][1])) self.assertEqual(backup_id2, res_dict['backups'][1]['id']) db.backup_destroy(context.get_admin_context(), backup_id3) @@ -492,9 +514,9 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual(200, res.status_int) self.assertEqual(2, len(res_dict['backups'])) - self.assertEqual(15, len(res_dict['backups'][0])) + self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0])) self.assertEqual(backup_id2, res_dict['backups'][0]['id']) - self.assertEqual(15, len(res_dict['backups'][1])) + self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][1])) self.assertEqual(backup_id1, res_dict['backups'][1]['id']) db.backup_destroy(context.get_admin_context(), backup_id3) @@ -515,7 +537,7 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual(200, res.status_int) self.assertEqual(1, len(res_dict['backups'])) - self.assertEqual(15, len(res_dict['backups'][0])) + self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0])) self.assertEqual(backup_id2, res_dict['backups'][0]['id']) db.backup_destroy(context.get_admin_context(), backup_id3) @@ -683,14 +705,22 @@ class BackupsAPITestCase(test.TestCase): @mock.patch('cinder.db.service_get_all_by_topic') @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_create_backup_delta(self, mock_validate, + @ddt.data(False, True) + def test_create_backup_delta(self, backup_from_snapshot, + mock_validate, _mock_service_get_all_by_topic): _mock_service_get_all_by_topic.return_value = [ {'availability_zone': "fake_az", 'host': 'test_host', 'disabled': 0, 'updated_at': timeutils.utcnow()}] volume_id = utils.create_volume(self.context, size=5)['id'] - + snapshot = None + snapshot_id = None + if backup_from_snapshot: + snapshot = utils.create_snapshot(self.context, + volume_id, + status='available') + snapshot_id = snapshot.id backup_id = self._create_backup(volume_id, status="available") body = {"backup": {"display_name": "nightly001", "display_description": @@ -698,6 +728,7 @@ class BackupsAPITestCase(test.TestCase): "volume_id": volume_id, "container": "nightlybackups", "incremental": True, + "snapshot_id": snapshot_id, } } req = webob.Request.blank('/v2/fake/backups') @@ -713,6 +744,8 @@ class BackupsAPITestCase(test.TestCase): self.assertTrue(mock_validate.called) db.backup_destroy(context.get_admin_context(), backup_id) + if snapshot: + snapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) @mock.patch('cinder.db.service_get_all_by_topic') @@ -1932,7 +1965,8 @@ class BackupsAPITestCase(test.TestCase): self.assertRaises(exception.NotSupportedOperation, self.backup_api.delete, self.context, backup, True) - def test_show_incremental_backup(self): + @ddt.data(False, True) + def test_show_incremental_backup(self, backup_from_snapshot): volume_id = utils.create_volume(self.context, size=5)['id'] parent_backup_id = self._create_backup(volume_id, status="available", num_dependent_backups=1) @@ -1940,9 +1974,16 @@ class BackupsAPITestCase(test.TestCase): incremental=True, parent_id=parent_backup_id, num_dependent_backups=1) + snapshot = None + snapshot_id = None + if backup_from_snapshot: + snapshot = utils.create_snapshot(self.context, + volume_id) + snapshot_id = snapshot.id child_backup_id = self._create_backup(volume_id, status="available", incremental=True, - parent_id=backup_id) + parent_id=backup_id, + snapshot_id=snapshot_id) req = webob.Request.blank('/v2/fake/backups/%s' % backup_id) @@ -1954,6 +1995,7 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual(200, res.status_int) self.assertTrue(res_dict['backup']['is_incremental']) self.assertTrue(res_dict['backup']['has_dependent_backups']) + self.assertIsNone(res_dict['backup']['snapshot_id']) req = webob.Request.blank('/v2/fake/backups/%s' % parent_backup_id) @@ -1965,6 +2007,7 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual(200, res.status_int) self.assertFalse(res_dict['backup']['is_incremental']) self.assertTrue(res_dict['backup']['has_dependent_backups']) + self.assertIsNone(res_dict['backup']['snapshot_id']) req = webob.Request.blank('/v2/fake/backups/%s' % child_backup_id) @@ -1976,7 +2019,11 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual(200, res.status_int) self.assertTrue(res_dict['backup']['is_incremental']) self.assertFalse(res_dict['backup']['has_dependent_backups']) + self.assertEqual(snapshot_id, res_dict['backup']['snapshot_id']) db.backup_destroy(context.get_admin_context(), child_backup_id) db.backup_destroy(context.get_admin_context(), backup_id) + db.backup_destroy(context.get_admin_context(), parent_backup_id) + if snapshot: + snapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) diff --git a/cinder/tests/unit/objects/test_backup.py b/cinder/tests/unit/objects/test_backup.py index b1e13720e..5c2828c01 100644 --- a/cinder/tests/unit/objects/test_backup.py +++ b/cinder/tests/unit/objects/test_backup.py @@ -33,6 +33,8 @@ fake_backup = { 'project_id': 'fake_project', 'temp_volume_id': None, 'temp_snapshot_id': None, + 'snapshot_id': None, + 'data_timestamp': None, } @@ -85,6 +87,11 @@ class TestBackup(test_objects.BaseObjectsTestCase): self.assertEqual('2', backup.temp_volume_id) self.assertEqual('3', backup.temp_snapshot_id) + def test_obj_field_snapshot_id(self): + backup = objects.Backup(context=self.context, + snapshot_id='2') + self.assertEqual('2', backup.snapshot_id) + def test_import_record(self): utils.replace_obj_loader(self, objects.Backup) backup = objects.Backup(context=self.context, id=1, parent_id=None, diff --git a/cinder/tests/unit/objects/test_objects.py b/cinder/tests/unit/objects/test_objects.py index b70783479..1348cb633 100644 --- a/cinder/tests/unit/objects/test_objects.py +++ b/cinder/tests/unit/objects/test_objects.py @@ -21,8 +21,8 @@ from cinder import test # NOTE: The hashes in this list should only be changed if they come with a # corresponding version bump in the affected objects. object_data = { - 'Backup': '1.1-cd077ec037f5ad1f5409fd660bd59f53', - 'BackupImport': '1.1-cd077ec037f5ad1f5409fd660bd59f53', + 'Backup': '1.2-62c3da6df3dccb76796e4da65a45a44f', + 'BackupImport': '1.2-62c3da6df3dccb76796e4da65a45a44f', 'BackupList': '1.0-24591dabe26d920ce0756fe64cd5f3aa', 'CGSnapshot': '1.0-190da2a2aa9457edc771d888f7d225c4', 'CGSnapshotList': '1.0-e8c3f4078cd0ee23487b34d173eec776', diff --git a/cinder/tests/unit/test_db_api.py b/cinder/tests/unit/test_db_api.py index c826e2860..c9eb9a24c 100644 --- a/cinder/tests/unit/test_db_api.py +++ b/cinder/tests/unit/test_db_api.py @@ -1901,7 +1901,8 @@ class DBAPIBackupTestCase(BaseTest): """Tests for db.api.backup_* methods.""" - _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at'] + _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', + 'updated_at', 'data_timestamp'] def setUp(self): super(DBAPIBackupTestCase, self).setUp() @@ -1927,7 +1928,8 @@ class DBAPIBackupTestCase(BaseTest): 'object_count': 100, 'temp_volume_id': 'temp_volume_id', 'temp_snapshot_id': 'temp_snapshot_id', - 'num_dependent_backups': 0, } + 'num_dependent_backups': 0, + 'snapshot_id': 'snapshot_id', } if one: return base_values diff --git a/cinder/tests/unit/test_migrations.py b/cinder/tests/unit/test_migrations.py index 75df2424f..c964d1629 100644 --- a/cinder/tests/unit/test_migrations.py +++ b/cinder/tests/unit/test_migrations.py @@ -710,6 +710,13 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin): self.assertIsInstance(private_data.c.last_used.type, self.TIME_TYPE) + def _check_061(self, engine, data): + backups = db_utils.get_table(engine, 'backups') + self.assertIsInstance(backups.c.snapshot_id.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(backups.c.data_timestamp.type, + self.TIME_TYPE) + def test_walk_versions(self): self.walk_versions(False, False) diff --git a/cinder/tests/unit/test_volume_utils.py b/cinder/tests/unit/test_volume_utils.py index 1329ee662..d5158dbc2 100644 --- a/cinder/tests/unit/test_volume_utils.py +++ b/cinder/tests/unit/test_volume_utils.py @@ -385,6 +385,7 @@ class NotifyUsageTestCase(test.TestCase): 'fail_reason': None, 'parent_id': 'fake_parent_id', 'num_dependent_backups': 0, + 'snapshot_id': None, } # Make it easier to find out differences between raw and expected. diff --git a/cinder/tests/unit/utils.py b/cinder/tests/unit/utils.py index 69f85b530..bc307c232 100644 --- a/cinder/tests/unit/utils.py +++ b/cinder/tests/unit/utils.py @@ -171,7 +171,9 @@ def create_backup(ctxt, status='creating', parent_id=None, temp_volume_id=None, - temp_snapshot_id=None): + temp_snapshot_id=None, + snapshot_id=None, + data_timestamp=None): backup = {} backup['volume_id'] = volume_id backup['user_id'] = ctxt.user_id @@ -189,6 +191,8 @@ def create_backup(ctxt, backup['object_count'] = 22 backup['temp_volume_id'] = temp_volume_id backup['temp_snapshot_id'] = temp_snapshot_id + backup['snapshot_id'] = snapshot_id + backup['data_timestamp'] = data_timestamp return db.backup_create(ctxt, backup) diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py index b5ecd979c..aa3d32222 100644 --- a/cinder/volume/driver.py +++ b/cinder/volume/driver.py @@ -1073,6 +1073,15 @@ class BaseVD(object): def backup_volume(self, context, backup, backup_service): """Create a new backup from an existing volume.""" + # NOTE(xyang): _backup_volume_temp_snapshot and + # _backup_volume_temp_volume are splitted into two + # functions because there were concerns during code + # reviews that it is confusing to put all the logic + # into one function. There's a trade-off between + # reducing code duplication and increasing code + # readability here. Added a note here to explain why + # we've decided to have two separate functions as + # there will always be arguments from both sides. if self.backup_use_temp_snapshot(): self._backup_volume_temp_snapshot(context, backup, backup_service) @@ -1081,28 +1090,47 @@ class BaseVD(object): backup_service) def _backup_volume_temp_volume(self, context, backup, backup_service): - """Create a new backup from an existing volume. + """Create a new backup from an existing volume or snapshot. - For in-use volume, create a temp volume and back it up. + To backup a snapshot, create a temp volume from the snapshot and + back it up. + + Otherwise to backup an in-use volume, create a temp volume and + back it up. """ volume = self.db.volume_get(context, backup.volume_id) + snapshot = None + if backup.snapshot_id: + snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id) LOG.debug('Creating a new backup for volume %s.', volume['name']) - # NOTE(xyang): Check volume status; if 'in-use', create a temp - # volume from the source volume, backup the temp volume, and - # then clean up the temp volume; if 'available', just backup the - # volume. - previous_status = volume.get('previous_status', None) - device_to_backup = volume temp_vol_ref = None - if previous_status == "in-use": - temp_vol_ref = self._create_temp_cloned_volume( - context, volume) + device_to_backup = volume + + # NOTE(xyang): If it is to backup from snapshot, create a temp + # volume from the source snapshot, backup the temp volume, and + # then clean up the temp volume. + if snapshot: + temp_vol_ref = self._create_temp_volume_from_snapshot( + context, volume, snapshot) backup.temp_volume_id = temp_vol_ref['id'] backup.save() device_to_backup = temp_vol_ref + else: + # NOTE(xyang): Check volume status if it is not to backup from + # snapshot; if 'in-use', create a temp volume from the source + # volume, backup the temp volume, and then clean up the temp + # volume; if 'available', just backup the volume. + previous_status = volume.get('previous_status') + if previous_status == "in-use": + temp_vol_ref = self._create_temp_cloned_volume( + context, volume) + backup.temp_volume_id = temp_vol_ref['id'] + backup.save() + device_to_backup = temp_vol_ref + self._backup_device(context, backup, backup_service, device_to_backup) if temp_vol_ref: @@ -1111,29 +1139,43 @@ class BaseVD(object): backup.save() def _backup_volume_temp_snapshot(self, context, backup, backup_service): - """Create a new backup from an existing volume. + """Create a new backup from an existing volume or snapshot. - For in-use volume, create a temp snapshot and back it up. + If it is to backup from snapshot, back it up directly. + + Otherwise for in-use volume, create a temp snapshot and back it up. """ volume = self.db.volume_get(context, backup.volume_id) + snapshot = None + if backup.snapshot_id: + snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id) LOG.debug('Creating a new backup for volume %s.', volume['name']) - # NOTE(xyang): Check volume status; if 'in-use', create a temp - # snapshot from the source volume, backup the temp snapshot, and - # then clean up the temp snapshot; if 'available', just backup the - # volume. - previous_status = volume.get('previous_status', None) device_to_backup = volume is_snapshot = False temp_snapshot = None - if previous_status == "in-use": - temp_snapshot = self._create_temp_snapshot(context, volume) - backup.temp_snapshot_id = temp_snapshot.id - backup.save() - device_to_backup = temp_snapshot + + # NOTE(xyang): If it is to backup from snapshot, back it up + # directly. No need to clean it up. + if snapshot: + device_to_backup = snapshot is_snapshot = True + else: + # NOTE(xyang): If it is not to backup from snapshot, check volume + # status. If the volume status is 'in-use', create a temp snapshot + # from the source volume, backup the temp snapshot, and then clean + # up the temp snapshot; if the volume status is 'available', just + # backup the volume. + previous_status = volume.get('previous_status') + if previous_status == "in-use": + temp_snapshot = self._create_temp_snapshot(context, volume) + backup.temp_snapshot_id = temp_snapshot.id + backup.save() + device_to_backup = temp_snapshot + is_snapshot = True + self._backup_device(context, backup, backup_service, device_to_backup, is_snapshot) @@ -1255,6 +1297,27 @@ class BaseVD(object): {'status': 'available'}) return temp_vol_ref + def _create_temp_volume_from_snapshot(self, context, volume, snapshot): + temp_volume = { + 'size': volume['size'], + 'display_name': 'backup-vol-%s' % volume['id'], + 'host': volume['host'], + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': 'creating', + } + temp_vol_ref = self.db.volume_create(context, temp_volume) + try: + self.create_volume_from_snapshot(temp_vol_ref, snapshot) + except Exception: + with excutils.save_and_reraise_exception(): + self.db.volume_destroy(context.elevated(), + temp_vol_ref['id']) + + self.db.volume_update(context, temp_vol_ref['id'], + {'status': 'available'}) + return temp_vol_ref + def _delete_temp_snapshot(self, context, snapshot): self.delete_snapshot(snapshot) with snapshot.obj_as_admin(): diff --git a/cinder/volume/drivers/lvm.py b/cinder/volume/drivers/lvm.py index 868792362..0f7a147cf 100644 --- a/cinder/volume/drivers/lvm.py +++ b/cinder/volume/drivers/lvm.py @@ -31,6 +31,7 @@ from cinder.brick.local_dev import lvm as lvm from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.image import image_utils +from cinder import objects from cinder import utils from cinder.volume import driver from cinder.volume import utils as volutils @@ -505,15 +506,28 @@ class LVMVolumeDriver(driver.VolumeDriver): def backup_volume(self, context, backup, backup_service): """Create a new backup from an existing volume.""" volume = self.db.volume_get(context, backup.volume_id) + snapshot = None + if backup.snapshot_id: + snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id) temp_snapshot = None - previous_status = volume['previous_status'] - if previous_status == 'in-use': - temp_snapshot = self._create_temp_snapshot(context, volume) - backup.temp_snapshot_id = temp_snapshot.id - backup.save() - volume_path = self.local_path(temp_snapshot) + # NOTE(xyang): If it is to backup from snapshot, back it up + # directly. No need to clean it up. + if snapshot: + volume_path = self.local_path(snapshot) else: - volume_path = self.local_path(volume) + # NOTE(xyang): If it is not to backup from snapshot, check volume + # status. If the volume status is 'in-use', create a temp snapshot + # from the source volume, backup the temp snapshot, and then clean + # up the temp snapshot; if the volume status is 'available', just + # backup the volume. + previous_status = volume.get('previous_status', None) + if previous_status == "in-use": + temp_snapshot = self._create_temp_snapshot(context, volume) + backup.temp_snapshot_id = temp_snapshot.id + backup.save() + volume_path = self.local_path(temp_snapshot) + else: + volume_path = self.local_path(volume) try: with utils.temporary_chown(volume_path): diff --git a/cinder/volume/utils.py b/cinder/volume/utils.py index e94ac20ec..e875f5eee 100644 --- a/cinder/volume/utils.py +++ b/cinder/volume/utils.py @@ -108,6 +108,7 @@ def _usage_from_backup(backup_ref, **kw): fail_reason=backup_ref['fail_reason'], parent_id=backup_ref['parent_id'], num_dependent_backups=num_dependent_backups, + snapshot_id=backup_ref['snapshot_id'], ) usage_info.update(kw) diff --git a/releasenotes/notes/backup-snapshots-2f547c8788bc11e1.yaml b/releasenotes/notes/backup-snapshots-2f547c8788bc11e1.yaml new file mode 100644 index 000000000..e72117eb5 --- /dev/null +++ b/releasenotes/notes/backup-snapshots-2f547c8788bc11e1.yaml @@ -0,0 +1,3 @@ +--- +features: + - Backup snapshots.