]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Backup snapshots
authorXing Yang <xing.yang@emc.com>
Sat, 10 Oct 2015 01:57:18 +0000 (21:57 -0400)
committerXing Yang <xing.yang@emc.com>
Sat, 21 Nov 2015 15:15:19 +0000 (10:15 -0500)
Today we can backup a volume, but not a snapshot.
This patch adds support to backup snapshots and
provide another layer of data protection for the
user.

DocImpact
implements blueprint backup-snapshots

Change-Id: Ib4ab9ca9dc72b30151154f3f96037f9ce3c9c540

17 files changed:
cinder/api/contrib/backups.py
cinder/api/views/backups.py
cinder/backup/api.py
cinder/db/sqlalchemy/migrate_repo/versions/061_add_snapshot_id_timestamp_to_backups.py [new file with mode: 0644]
cinder/db/sqlalchemy/models.py
cinder/objects/backup.py
cinder/tests/unit/api/contrib/test_backups.py
cinder/tests/unit/objects/test_backup.py
cinder/tests/unit/objects/test_objects.py
cinder/tests/unit/test_db_api.py
cinder/tests/unit/test_migrations.py
cinder/tests/unit/test_volume_utils.py
cinder/tests/unit/utils.py
cinder/volume/driver.py
cinder/volume/drivers/lvm.py
cinder/volume/utils.py
releasenotes/notes/backup-snapshots-2f547c8788bc11e1.yaml [new file with mode: 0644]

index 4cd9e2fa2cc48fbbade30e0e28926de0b566e0de..dcbf80778d6d5c647ebbe65749fe23f744995409 100644 (file)
@@ -263,6 +263,7 @@ class BackupsController(wsgi.Controller):
         description = backup.get('description', None)
         incremental = backup.get('incremental', False)
         force = backup.get('force', False)
+        snapshot_id = backup.get('snapshot_id', None)
         LOG.info(_LI("Creating backup of volume %(volume_id)s in container"
                      " %(container)s"),
                  {'volume_id': volume_id, 'container': container},
@@ -271,7 +272,8 @@ class BackupsController(wsgi.Controller):
         try:
             new_backup = self.backup_api.create(context, name, description,
                                                 volume_id, container,
-                                                incremental, None, force)
+                                                incremental, None, force,
+                                                snapshot_id)
         except exception.InvalidVolume as error:
             raise exc.HTTPBadRequest(explanation=error.msg)
         except exception.VolumeNotFound as error:
index 3917e2c67c570eaa4cd078c45b0f3808000b4207..7c84cc34a8bbdd9a06dd8af4fde903a154bcf06e 100644 (file)
@@ -78,6 +78,8 @@ class ViewBuilder(common.ViewBuilder):
                 'links': self._get_links(request, backup['id']),
                 'is_incremental': backup.is_incremental,
                 'has_dependent_backups': backup.has_dependent_backups,
+                'snapshot_id': backup.snapshot_id,
+                'data_timestamp': backup.data_timestamp,
             }
         }
 
index 56d7e0b7c9b9a9a78e9a6869d030b25a3c64f8aa..a5ef98a288ab8043f89d88bfcd544620abf2f339 100644 (file)
 Handles all requests relating to the volume backups service.
 """
 
+from datetime import datetime
+
 from eventlet import greenthread
 from oslo_config import cfg
 from oslo_log import log as logging
 from oslo_utils import excutils
 from oslo_utils import strutils
+from pytz import timezone
 
 from cinder.backup import rpcapi as backup_rpcapi
 from cinder import context
@@ -150,20 +153,28 @@ class API(base.Base):
 
     def create(self, context, name, description, volume_id,
                container, incremental=False, availability_zone=None,
-               force=False):
+               force=False, snapshot_id=None):
         """Make the RPC call to create a volume backup."""
         check_policy(context, 'create')
         volume = self.volume_api.get(context, volume_id)
+        snapshot = None
+        if snapshot_id:
+            snapshot = self.volume_api.get_snapshot(context, snapshot_id)
 
         if volume['status'] not in ["available", "in-use"]:
             msg = (_('Volume to be backed up must be available '
                      'or in-use, but the current status is "%s".')
                    % volume['status'])
             raise exception.InvalidVolume(reason=msg)
-        elif volume['status'] in ["in-use"] and not force:
+        elif volume['status'] in ["in-use"] and not snapshot_id and not force:
             msg = _('Backing up an in-use volume must use '
                     'the force flag.')
             raise exception.InvalidVolume(reason=msg)
+        elif snapshot_id and snapshot['status'] not in ["available"]:
+            msg = (_('Snapshot to be backed up must be available, '
+                     'but the current status is "%s".')
+                   % snapshot['status'])
+            raise exception.InvalidSnapshot(reason=msg)
 
         previous_status = volume['status']
         volume_host = volume_utils.extract_host(volume['host'], 'host')
@@ -208,15 +219,36 @@ class API(base.Base):
                     raise exception.BackupLimitExceeded(
                         allowed=quotas[over])
 
-        # Find the latest backup of the volume and use it as the parent
-        # backup to do an incremental backup.
+        # Find the latest backup and use it as the parent backup to do an
+        # incremental backup.
         latest_backup = None
         if incremental:
             backups = objects.BackupList.get_all_by_volume(context.elevated(),
                                                            volume_id)
             if backups.objects:
-                latest_backup = max(backups.objects,
-                                    key=lambda x: x['created_at'])
+                # NOTE(xyang): The 'data_timestamp' field records the time
+                # when the data on the volume was first saved. If it is
+                # a backup from volume, 'data_timestamp' will be the same
+                # as 'created_at' for a backup. If it is a backup from a
+                # snapshot, 'data_timestamp' will be the same as
+                # 'created_at' for a snapshot.
+                # If not backing up from snapshot, the backup with the latest
+                # 'data_timestamp' will be the parent; If backing up from
+                # snapshot, the backup with the latest 'data_timestamp' will
+                # be chosen only if 'data_timestamp' is earlier than the
+                # 'created_at' timestamp of the snapshot; Otherwise, the
+                # backup will not be chosen as the parent.
+                # For example, a volume has a backup taken at 8:00, then
+                # a snapshot taken at 8:10, and then a backup at 8:20.
+                # When taking an incremental backup of the snapshot, the
+                # parent should be the backup at 8:00, not 8:20, and the
+                # 'data_timestamp' of this new backup will be 8:10.
+                latest_backup = max(
+                    backups.objects,
+                    key=lambda x: x['data_timestamp']
+                    if (not snapshot or (snapshot and x['data_timestamp']
+                                         < snapshot['created_at']))
+                    else datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
             else:
                 msg = _('No backups available to do an incremental backup.')
                 raise exception.InvalidBackup(reason=msg)
@@ -229,6 +261,11 @@ class API(base.Base):
                         'incremental backup.')
                 raise exception.InvalidBackup(reason=msg)
 
+        data_timestamp = None
+        if snapshot_id:
+            snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
+            data_timestamp = snapshot.created_at
+
         self.db.volume_update(context, volume_id,
                               {'status': 'backing-up',
                                'previous_status': previous_status})
@@ -244,9 +281,14 @@ class API(base.Base):
                 'parent_id': parent_id,
                 'size': volume['size'],
                 'host': volume_host,
+                'snapshot_id': snapshot_id,
+                'data_timestamp': data_timestamp,
             }
             backup = objects.Backup(context=context, **kwargs)
             backup.create()
+            if not snapshot_id:
+                backup.data_timestamp = backup.created_at
+                backup.save()
             QUOTAS.commit(context, reservations)
         except Exception:
             with excutils.save_and_reraise_exception():
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/061_add_snapshot_id_timestamp_to_backups.py b/cinder/db/sqlalchemy/migrate_repo/versions/061_add_snapshot_id_timestamp_to_backups.py
new file mode 100644 (file)
index 0000000..5b242d7
--- /dev/null
@@ -0,0 +1,40 @@
+# Copyright (c) 2015 EMC Corporation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from sqlalchemy import Column, DateTime, MetaData, String, Table
+
+
+def upgrade(migrate_engine):
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    backups = Table('backups', meta, autoload=True)
+    snapshot_id = Column('snapshot_id', String(length=36))
+    data_timestamp = Column('data_timestamp', DateTime)
+
+    backups.create_column(snapshot_id)
+    backups.update().values(snapshot_id=None).execute()
+
+    backups.create_column(data_timestamp)
+    backups.update().values(data_timestamp=None).execute()
+
+    # Copy existing created_at timestamp to data_timestamp
+    # in the backups table.
+    backups_list = list(backups.select().execute())
+    for backup in backups_list:
+        backup_id = backup.id
+        backups.update().\
+            where(backups.c.id == backup_id).\
+            values(data_timestamp=backup.created_at).execute()
index ee9b63872184979a6977cf6d2862ff784ac353d9..282b52e7e5b89fd5839a89d2fa6c3e6eaeb1c9de 100644 (file)
@@ -541,6 +541,8 @@ class Backup(BASE, CinderBase):
     temp_volume_id = Column(String(36))
     temp_snapshot_id = Column(String(36))
     num_dependent_backups = Column(Integer)
+    snapshot_id = Column(String(36))
+    data_timestamp = Column(DateTime)
 
     @validates('fail_reason')
     def validate_fail_reason(self, key, fail_reason):
index 00ebb1db7e4ee7d706c916cf0e6e845cd3016659..33e074e0c103438238ea37b4180a2e62e3326271 100644 (file)
@@ -38,7 +38,8 @@ class Backup(base.CinderPersistentObject, base.CinderObject,
     # Version 1.0: Initial version
     # Version 1.1: Add new field num_dependent_backups and extra fields
     #              is_incremental and has_dependent_backups.
-    VERSION = '1.1'
+    # Version 1.2: Add new field snapshot_id and data_timestamp.
+    VERSION = '1.2'
 
     fields = {
         'id': fields.UUIDField(),
@@ -68,6 +69,8 @@ class Backup(base.CinderPersistentObject, base.CinderObject,
         'temp_volume_id': fields.StringField(nullable=True),
         'temp_snapshot_id': fields.StringField(nullable=True),
         'num_dependent_backups': fields.IntegerField(),
+        'snapshot_id': fields.StringField(nullable=True),
+        'data_timestamp': fields.DateTimeField(nullable=True),
     }
 
     obj_extra_fields = ['name', 'is_incremental', 'has_dependent_backups']
index 7832036dad042570d7b4b8321af15b30e45a08c8..65aa45a07b3c60ad66f6043309f4260d9acc8e40 100644 (file)
@@ -20,6 +20,7 @@ Tests for Backup code.
 import json
 from xml.dom import minidom
 
+import ddt
 import mock
 from oslo_utils import timeutils
 import webob
@@ -37,7 +38,10 @@ from cinder.tests.unit import utils
 # needed for stubs to work
 import cinder.volume
 
+NUM_ELEMENTS_IN_BACKUP = 17
 
+
+@ddt.ddt
 class BackupsAPITestCase(test.TestCase):
     """Test Case for backups API."""
 
@@ -55,11 +59,12 @@ class BackupsAPITestCase(test.TestCase):
                        display_description='this is a test backup',
                        container='volumebackups',
                        status='creating',
-                       snapshot=False,
                        incremental=False,
                        parent_id=None,
                        size=0, object_count=0, host='testhost',
-                       num_dependent_backups=0):
+                       num_dependent_backups=0,
+                       snapshot_id=None,
+                       data_timestamp=None):
         """Create a backup object."""
         backup = {}
         backup['volume_id'] = volume_id
@@ -74,21 +79,35 @@ class BackupsAPITestCase(test.TestCase):
         backup['fail_reason'] = ''
         backup['size'] = size
         backup['object_count'] = object_count
-        backup['snapshot'] = snapshot
         backup['incremental'] = incremental
         backup['parent_id'] = parent_id
         backup['num_dependent_backups'] = num_dependent_backups
-        return db.backup_create(context.get_admin_context(), backup)['id']
+        backup['snapshot_id'] = snapshot_id
+        backup['data_timestamp'] = data_timestamp
+        backup = db.backup_create(context.get_admin_context(), backup)
+        if not snapshot_id:
+            db.backup_update(context.get_admin_context(),
+                             backup['id'],
+                             {'data_timestamp': backup['created_at']})
+        return backup['id']
 
     @staticmethod
     def _get_backup_attrib(backup_id, attrib_name):
         return db.backup_get(context.get_admin_context(),
                              backup_id)[attrib_name]
 
-    def test_show_backup(self):
+    @ddt.data(False, True)
+    def test_show_backup(self, backup_from_snapshot):
         volume_id = utils.create_volume(self.context, size=5,
                                         status='creating')['id']
-        backup_id = self._create_backup(volume_id)
+        snapshot = None
+        snapshot_id = None
+        if backup_from_snapshot:
+            snapshot = utils.create_snapshot(self.context,
+                                             volume_id)
+            snapshot_id = snapshot.id
+        backup_id = self._create_backup(volume_id,
+                                        snapshot_id=snapshot_id)
         req = webob.Request.blank('/v2/fake/backups/%s' %
                                   backup_id)
         req.method = 'GET'
@@ -109,8 +128,11 @@ class BackupsAPITestCase(test.TestCase):
         self.assertEqual(volume_id, res_dict['backup']['volume_id'])
         self.assertFalse(res_dict['backup']['is_incremental'])
         self.assertFalse(res_dict['backup']['has_dependent_backups'])
+        self.assertEqual(snapshot_id, res_dict['backup']['snapshot_id'])
         self.assertIn('updated_at', res_dict['backup'])
 
+        if snapshot:
+            snapshot.destroy()
         db.backup_destroy(context.get_admin_context(), backup_id)
         db.volume_destroy(context.get_admin_context(), volume_id)
 
@@ -283,7 +305,7 @@ class BackupsAPITestCase(test.TestCase):
         res_dict = json.loads(res.body)
 
         self.assertEqual(200, res.status_int)
-        self.assertEqual(15, len(res_dict['backups'][0]))
+        self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0]))
         self.assertEqual('az1', res_dict['backups'][0]['availability_zone'])
         self.assertEqual('volumebackups',
                          res_dict['backups'][0]['container'])
@@ -298,7 +320,7 @@ class BackupsAPITestCase(test.TestCase):
         self.assertEqual('1', res_dict['backups'][0]['volume_id'])
         self.assertIn('updated_at', res_dict['backups'][0])
 
-        self.assertEqual(15, len(res_dict['backups'][1]))
+        self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][1]))
         self.assertEqual('az1', res_dict['backups'][1]['availability_zone'])
         self.assertEqual('volumebackups',
                          res_dict['backups'][1]['container'])
@@ -313,7 +335,7 @@ class BackupsAPITestCase(test.TestCase):
         self.assertEqual('1', res_dict['backups'][1]['volume_id'])
         self.assertIn('updated_at', res_dict['backups'][1])
 
-        self.assertEqual(15, len(res_dict['backups'][2]))
+        self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][2]))
         self.assertEqual('az1', res_dict['backups'][2]['availability_zone'])
         self.assertEqual('volumebackups', res_dict['backups'][2]['container'])
         self.assertEqual('this is a test backup',
@@ -469,9 +491,9 @@ class BackupsAPITestCase(test.TestCase):
 
         self.assertEqual(200, res.status_int)
         self.assertEqual(2, len(res_dict['backups']))
-        self.assertEqual(15, len(res_dict['backups'][0]))
+        self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0]))
         self.assertEqual(backup_id3, res_dict['backups'][0]['id'])
-        self.assertEqual(15, len(res_dict['backups'][1]))
+        self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][1]))
         self.assertEqual(backup_id2, res_dict['backups'][1]['id'])
 
         db.backup_destroy(context.get_admin_context(), backup_id3)
@@ -492,9 +514,9 @@ class BackupsAPITestCase(test.TestCase):
 
         self.assertEqual(200, res.status_int)
         self.assertEqual(2, len(res_dict['backups']))
-        self.assertEqual(15, len(res_dict['backups'][0]))
+        self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0]))
         self.assertEqual(backup_id2, res_dict['backups'][0]['id'])
-        self.assertEqual(15, len(res_dict['backups'][1]))
+        self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][1]))
         self.assertEqual(backup_id1, res_dict['backups'][1]['id'])
 
         db.backup_destroy(context.get_admin_context(), backup_id3)
@@ -515,7 +537,7 @@ class BackupsAPITestCase(test.TestCase):
 
         self.assertEqual(200, res.status_int)
         self.assertEqual(1, len(res_dict['backups']))
-        self.assertEqual(15, len(res_dict['backups'][0]))
+        self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0]))
         self.assertEqual(backup_id2, res_dict['backups'][0]['id'])
 
         db.backup_destroy(context.get_admin_context(), backup_id3)
@@ -683,14 +705,22 @@ class BackupsAPITestCase(test.TestCase):
     @mock.patch('cinder.db.service_get_all_by_topic')
     @mock.patch(
         'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
-    def test_create_backup_delta(self, mock_validate,
+    @ddt.data(False, True)
+    def test_create_backup_delta(self, backup_from_snapshot,
+                                 mock_validate,
                                  _mock_service_get_all_by_topic):
         _mock_service_get_all_by_topic.return_value = [
             {'availability_zone': "fake_az", 'host': 'test_host',
              'disabled': 0, 'updated_at': timeutils.utcnow()}]
 
         volume_id = utils.create_volume(self.context, size=5)['id']
-
+        snapshot = None
+        snapshot_id = None
+        if backup_from_snapshot:
+            snapshot = utils.create_snapshot(self.context,
+                                             volume_id,
+                                             status='available')
+            snapshot_id = snapshot.id
         backup_id = self._create_backup(volume_id, status="available")
         body = {"backup": {"display_name": "nightly001",
                            "display_description":
@@ -698,6 +728,7 @@ class BackupsAPITestCase(test.TestCase):
                            "volume_id": volume_id,
                            "container": "nightlybackups",
                            "incremental": True,
+                           "snapshot_id": snapshot_id,
                            }
                 }
         req = webob.Request.blank('/v2/fake/backups')
@@ -713,6 +744,8 @@ class BackupsAPITestCase(test.TestCase):
         self.assertTrue(mock_validate.called)
 
         db.backup_destroy(context.get_admin_context(), backup_id)
+        if snapshot:
+            snapshot.destroy()
         db.volume_destroy(context.get_admin_context(), volume_id)
 
     @mock.patch('cinder.db.service_get_all_by_topic')
@@ -1932,7 +1965,8 @@ class BackupsAPITestCase(test.TestCase):
         self.assertRaises(exception.NotSupportedOperation,
                           self.backup_api.delete, self.context, backup, True)
 
-    def test_show_incremental_backup(self):
+    @ddt.data(False, True)
+    def test_show_incremental_backup(self, backup_from_snapshot):
         volume_id = utils.create_volume(self.context, size=5)['id']
         parent_backup_id = self._create_backup(volume_id, status="available",
                                                num_dependent_backups=1)
@@ -1940,9 +1974,16 @@ class BackupsAPITestCase(test.TestCase):
                                         incremental=True,
                                         parent_id=parent_backup_id,
                                         num_dependent_backups=1)
+        snapshot = None
+        snapshot_id = None
+        if backup_from_snapshot:
+            snapshot = utils.create_snapshot(self.context,
+                                             volume_id)
+            snapshot_id = snapshot.id
         child_backup_id = self._create_backup(volume_id, status="available",
                                               incremental=True,
-                                              parent_id=backup_id)
+                                              parent_id=backup_id,
+                                              snapshot_id=snapshot_id)
 
         req = webob.Request.blank('/v2/fake/backups/%s' %
                                   backup_id)
@@ -1954,6 +1995,7 @@ class BackupsAPITestCase(test.TestCase):
         self.assertEqual(200, res.status_int)
         self.assertTrue(res_dict['backup']['is_incremental'])
         self.assertTrue(res_dict['backup']['has_dependent_backups'])
+        self.assertIsNone(res_dict['backup']['snapshot_id'])
 
         req = webob.Request.blank('/v2/fake/backups/%s' %
                                   parent_backup_id)
@@ -1965,6 +2007,7 @@ class BackupsAPITestCase(test.TestCase):
         self.assertEqual(200, res.status_int)
         self.assertFalse(res_dict['backup']['is_incremental'])
         self.assertTrue(res_dict['backup']['has_dependent_backups'])
+        self.assertIsNone(res_dict['backup']['snapshot_id'])
 
         req = webob.Request.blank('/v2/fake/backups/%s' %
                                   child_backup_id)
@@ -1976,7 +2019,11 @@ class BackupsAPITestCase(test.TestCase):
         self.assertEqual(200, res.status_int)
         self.assertTrue(res_dict['backup']['is_incremental'])
         self.assertFalse(res_dict['backup']['has_dependent_backups'])
+        self.assertEqual(snapshot_id, res_dict['backup']['snapshot_id'])
 
         db.backup_destroy(context.get_admin_context(), child_backup_id)
         db.backup_destroy(context.get_admin_context(), backup_id)
+        db.backup_destroy(context.get_admin_context(), parent_backup_id)
+        if snapshot:
+            snapshot.destroy()
         db.volume_destroy(context.get_admin_context(), volume_id)
index b1e13720ef49b6b245714099a65556f2e0a51b60..5c2828c01d303462bbc1955b31fda8d420901f88 100644 (file)
@@ -33,6 +33,8 @@ fake_backup = {
     'project_id': 'fake_project',
     'temp_volume_id': None,
     'temp_snapshot_id': None,
+    'snapshot_id': None,
+    'data_timestamp': None,
 }
 
 
@@ -85,6 +87,11 @@ class TestBackup(test_objects.BaseObjectsTestCase):
         self.assertEqual('2', backup.temp_volume_id)
         self.assertEqual('3', backup.temp_snapshot_id)
 
+    def test_obj_field_snapshot_id(self):
+        backup = objects.Backup(context=self.context,
+                                snapshot_id='2')
+        self.assertEqual('2', backup.snapshot_id)
+
     def test_import_record(self):
         utils.replace_obj_loader(self, objects.Backup)
         backup = objects.Backup(context=self.context, id=1, parent_id=None,
index b70783479a98066cd67d318b861c9396419e9d10..1348cb633d62930d5d273dfa98a21b503f5158d2 100644 (file)
@@ -21,8 +21,8 @@ from cinder import test
 # NOTE: The hashes in this list should only be changed if they come with a
 # corresponding version bump in the affected objects.
 object_data = {
-    'Backup': '1.1-cd077ec037f5ad1f5409fd660bd59f53',
-    'BackupImport': '1.1-cd077ec037f5ad1f5409fd660bd59f53',
+    'Backup': '1.2-62c3da6df3dccb76796e4da65a45a44f',
+    'BackupImport': '1.2-62c3da6df3dccb76796e4da65a45a44f',
     'BackupList': '1.0-24591dabe26d920ce0756fe64cd5f3aa',
     'CGSnapshot': '1.0-190da2a2aa9457edc771d888f7d225c4',
     'CGSnapshotList': '1.0-e8c3f4078cd0ee23487b34d173eec776',
index c826e286038dd54cdf9b37093b3ab95f32798d6d..c9eb9a24c96b8af28343e482fff06eaccfd2dda1 100644 (file)
@@ -1901,7 +1901,8 @@ class DBAPIBackupTestCase(BaseTest):
 
     """Tests for db.api.backup_* methods."""
 
-    _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
+    _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
+                     'updated_at', 'data_timestamp']
 
     def setUp(self):
         super(DBAPIBackupTestCase, self).setUp()
@@ -1927,7 +1928,8 @@ class DBAPIBackupTestCase(BaseTest):
             'object_count': 100,
             'temp_volume_id': 'temp_volume_id',
             'temp_snapshot_id': 'temp_snapshot_id',
-            'num_dependent_backups': 0, }
+            'num_dependent_backups': 0,
+            'snapshot_id': 'snapshot_id', }
         if one:
             return base_values
 
index 75df2424fc493f91d1a6de05df2da54450f87717..c964d1629274f357cfa2ddf17bad854d75705256 100644 (file)
@@ -710,6 +710,13 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertIsInstance(private_data.c.last_used.type,
                               self.TIME_TYPE)
 
+    def _check_061(self, engine, data):
+        backups = db_utils.get_table(engine, 'backups')
+        self.assertIsInstance(backups.c.snapshot_id.type,
+                              sqlalchemy.types.VARCHAR)
+        self.assertIsInstance(backups.c.data_timestamp.type,
+                              self.TIME_TYPE)
+
     def test_walk_versions(self):
         self.walk_versions(False, False)
 
index 1329ee662404a9f4766ed0c17ea48ef00debc5cb..d5158dbc2f9b183adba8f4ea203438350bbd4c62 100644 (file)
@@ -385,6 +385,7 @@ class NotifyUsageTestCase(test.TestCase):
             'fail_reason': None,
             'parent_id': 'fake_parent_id',
             'num_dependent_backups': 0,
+            'snapshot_id': None,
         }
 
         # Make it easier to find out differences between raw and expected.
index 69f85b5303d9857d3ea7de23f2147d3cdbd557c0..bc307c2321a1937b4a95b0525240ed60317e3885 100644 (file)
@@ -171,7 +171,9 @@ def create_backup(ctxt,
                   status='creating',
                   parent_id=None,
                   temp_volume_id=None,
-                  temp_snapshot_id=None):
+                  temp_snapshot_id=None,
+                  snapshot_id=None,
+                  data_timestamp=None):
     backup = {}
     backup['volume_id'] = volume_id
     backup['user_id'] = ctxt.user_id
@@ -189,6 +191,8 @@ def create_backup(ctxt,
     backup['object_count'] = 22
     backup['temp_volume_id'] = temp_volume_id
     backup['temp_snapshot_id'] = temp_snapshot_id
+    backup['snapshot_id'] = snapshot_id
+    backup['data_timestamp'] = data_timestamp
     return db.backup_create(ctxt, backup)
 
 
index b5ecd979cac2ad5b4b44461e781a8c9324dabad8..aa3d322227111c27c8c39244ef0aabe6f5c3d259 100644 (file)
@@ -1073,6 +1073,15 @@ class BaseVD(object):
 
     def backup_volume(self, context, backup, backup_service):
         """Create a new backup from an existing volume."""
+        # NOTE(xyang): _backup_volume_temp_snapshot and
+        # _backup_volume_temp_volume are splitted into two
+        # functions because there were concerns during code
+        # reviews that it is confusing to put all the logic
+        # into one function. There's a trade-off between
+        # reducing code duplication and increasing code
+        # readability here. Added a note here to explain why
+        # we've decided to have two separate functions as
+        # there will always be arguments from both sides.
         if self.backup_use_temp_snapshot():
             self._backup_volume_temp_snapshot(context, backup,
                                               backup_service)
@@ -1081,28 +1090,47 @@ class BaseVD(object):
                                             backup_service)
 
     def _backup_volume_temp_volume(self, context, backup, backup_service):
-        """Create a new backup from an existing volume.
+        """Create a new backup from an existing volume or snapshot.
 
-        For in-use volume, create a temp volume and back it up.
+        To backup a snapshot, create a temp volume from the snapshot and
+        back it up.
+
+        Otherwise to backup an in-use volume, create a temp volume and
+        back it up.
         """
         volume = self.db.volume_get(context, backup.volume_id)
+        snapshot = None
+        if backup.snapshot_id:
+            snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)
 
         LOG.debug('Creating a new backup for volume %s.', volume['name'])
 
-        # NOTE(xyang): Check volume status; if 'in-use', create a temp
-        # volume from the source volume, backup the temp volume, and
-        # then clean up the temp volume; if 'available', just backup the
-        # volume.
-        previous_status = volume.get('previous_status', None)
-        device_to_backup = volume
         temp_vol_ref = None
-        if previous_status == "in-use":
-            temp_vol_ref = self._create_temp_cloned_volume(
-                context, volume)
+        device_to_backup = volume
+
+        # NOTE(xyang): If it is to backup from snapshot, create a temp
+        # volume from the source snapshot, backup the temp volume, and
+        # then clean up the temp volume.
+        if snapshot:
+            temp_vol_ref = self._create_temp_volume_from_snapshot(
+                context, volume, snapshot)
             backup.temp_volume_id = temp_vol_ref['id']
             backup.save()
             device_to_backup = temp_vol_ref
 
+        else:
+            # NOTE(xyang): Check volume status if it is not to backup from
+            # snapshot; if 'in-use', create a temp volume from the source
+            # volume, backup the temp volume, and then clean up the temp
+            # volume; if 'available', just backup the volume.
+            previous_status = volume.get('previous_status')
+            if previous_status == "in-use":
+                temp_vol_ref = self._create_temp_cloned_volume(
+                    context, volume)
+                backup.temp_volume_id = temp_vol_ref['id']
+                backup.save()
+                device_to_backup = temp_vol_ref
+
         self._backup_device(context, backup, backup_service, device_to_backup)
 
         if temp_vol_ref:
@@ -1111,29 +1139,43 @@ class BaseVD(object):
             backup.save()
 
     def _backup_volume_temp_snapshot(self, context, backup, backup_service):
-        """Create a new backup from an existing volume.
+        """Create a new backup from an existing volume or snapshot.
 
-        For in-use volume, create a temp snapshot and back it up.
+        If it is to backup from snapshot, back it up directly.
+
+        Otherwise for in-use volume, create a temp snapshot and back it up.
         """
         volume = self.db.volume_get(context, backup.volume_id)
+        snapshot = None
+        if backup.snapshot_id:
+            snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)
 
         LOG.debug('Creating a new backup for volume %s.', volume['name'])
 
-        # NOTE(xyang): Check volume status; if 'in-use', create a temp
-        # snapshot from the source volume, backup the temp snapshot, and
-        # then clean up the temp snapshot; if 'available', just backup the
-        # volume.
-        previous_status = volume.get('previous_status', None)
         device_to_backup = volume
         is_snapshot = False
         temp_snapshot = None
-        if previous_status == "in-use":
-            temp_snapshot = self._create_temp_snapshot(context, volume)
-            backup.temp_snapshot_id = temp_snapshot.id
-            backup.save()
-            device_to_backup = temp_snapshot
+
+        # NOTE(xyang): If it is to backup from snapshot, back it up
+        # directly. No need to clean it up.
+        if snapshot:
+            device_to_backup = snapshot
             is_snapshot = True
 
+        else:
+            # NOTE(xyang): If it is not to backup from snapshot, check volume
+            # status. If the volume status is 'in-use', create a temp snapshot
+            # from the source volume, backup the temp snapshot, and then clean
+            # up the temp snapshot; if the volume status is 'available', just
+            # backup the volume.
+            previous_status = volume.get('previous_status')
+            if previous_status == "in-use":
+                temp_snapshot = self._create_temp_snapshot(context, volume)
+                backup.temp_snapshot_id = temp_snapshot.id
+                backup.save()
+                device_to_backup = temp_snapshot
+                is_snapshot = True
+
         self._backup_device(context, backup, backup_service, device_to_backup,
                             is_snapshot)
 
@@ -1255,6 +1297,27 @@ class BaseVD(object):
                               {'status': 'available'})
         return temp_vol_ref
 
+    def _create_temp_volume_from_snapshot(self, context, volume, snapshot):
+        temp_volume = {
+            'size': volume['size'],
+            'display_name': 'backup-vol-%s' % volume['id'],
+            'host': volume['host'],
+            'user_id': context.user_id,
+            'project_id': context.project_id,
+            'status': 'creating',
+        }
+        temp_vol_ref = self.db.volume_create(context, temp_volume)
+        try:
+            self.create_volume_from_snapshot(temp_vol_ref, snapshot)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                self.db.volume_destroy(context.elevated(),
+                                       temp_vol_ref['id'])
+
+        self.db.volume_update(context, temp_vol_ref['id'],
+                              {'status': 'available'})
+        return temp_vol_ref
+
     def _delete_temp_snapshot(self, context, snapshot):
         self.delete_snapshot(snapshot)
         with snapshot.obj_as_admin():
index 86879236238dc97666973ca9c230d0dabaaaa8c4..0f7a147cf12a1310799b5d779599f6e2c897f091 100644 (file)
@@ -31,6 +31,7 @@ from cinder.brick.local_dev import lvm as lvm
 from cinder import exception
 from cinder.i18n import _, _LE, _LI, _LW
 from cinder.image import image_utils
+from cinder import objects
 from cinder import utils
 from cinder.volume import driver
 from cinder.volume import utils as volutils
@@ -505,15 +506,28 @@ class LVMVolumeDriver(driver.VolumeDriver):
     def backup_volume(self, context, backup, backup_service):
         """Create a new backup from an existing volume."""
         volume = self.db.volume_get(context, backup.volume_id)
+        snapshot = None
+        if backup.snapshot_id:
+            snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)
         temp_snapshot = None
-        previous_status = volume['previous_status']
-        if previous_status == 'in-use':
-            temp_snapshot = self._create_temp_snapshot(context, volume)
-            backup.temp_snapshot_id = temp_snapshot.id
-            backup.save()
-            volume_path = self.local_path(temp_snapshot)
+        # NOTE(xyang): If it is to backup from snapshot, back it up
+        # directly. No need to clean it up.
+        if snapshot:
+            volume_path = self.local_path(snapshot)
         else:
-            volume_path = self.local_path(volume)
+            # NOTE(xyang): If it is not to backup from snapshot, check volume
+            # status. If the volume status is 'in-use', create a temp snapshot
+            # from the source volume, backup the temp snapshot, and then clean
+            # up the temp snapshot; if the volume status is 'available', just
+            # backup the volume.
+            previous_status = volume.get('previous_status', None)
+            if previous_status == "in-use":
+                temp_snapshot = self._create_temp_snapshot(context, volume)
+                backup.temp_snapshot_id = temp_snapshot.id
+                backup.save()
+                volume_path = self.local_path(temp_snapshot)
+            else:
+                volume_path = self.local_path(volume)
 
         try:
             with utils.temporary_chown(volume_path):
index e94ac20ecdcc8aaad87c078e3e246e63825bbfca..e875f5eee388e9ae0ad1ac48a025edd4c7483af3 100644 (file)
@@ -108,6 +108,7 @@ def _usage_from_backup(backup_ref, **kw):
                       fail_reason=backup_ref['fail_reason'],
                       parent_id=backup_ref['parent_id'],
                       num_dependent_backups=num_dependent_backups,
+                      snapshot_id=backup_ref['snapshot_id'],
                       )
 
     usage_info.update(kw)
diff --git a/releasenotes/notes/backup-snapshots-2f547c8788bc11e1.yaml b/releasenotes/notes/backup-snapshots-2f547c8788bc11e1.yaml
new file mode 100644 (file)
index 0000000..e72117e
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Backup snapshots.