]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Incremental backup improvements for L
authorwanghao <wanghao749@huawei.com>
Thu, 4 Jun 2015 09:50:12 +0000 (17:50 +0800)
committerwanghao <wanghao749@huawei.com>
Wed, 26 Aug 2015 06:33:14 +0000 (14:33 +0800)
1. Add 'is_incremental=True' and 'has_dependent_backups=True/False' to
response body of querying.
2. Add parent_id to notification system.

Since we need to get volume has_dependent_backups value when querying
volume detail list, to reduce the performance impact, add index to
parent_id column in backup table.

APIImpact

When showing backup detail it will return additional info
"is_incremental": True/False and "has_dependent_backups": True/False

DocImpact
Change-Id: Id2fbf5616ba7bea847cf0443006800db89dd7c35
Implements:  blueprint cinder-incremental-backup-improvements-for-l

13 files changed:
cinder/api/views/backups.py
cinder/backup/manager.py
cinder/db/sqlalchemy/migrate_repo/versions/054_add_has_dependent_backups_column_to_backups.py [new file with mode: 0644]
cinder/db/sqlalchemy/models.py
cinder/objects/backup.py
cinder/tests/unit/api/contrib/test_backups.py
cinder/tests/unit/fake_backup.py
cinder/tests/unit/objects/test_backup.py
cinder/tests/unit/test_backup.py
cinder/tests/unit/test_db_api.py
cinder/tests/unit/test_migrations.py
cinder/tests/unit/test_volume_utils.py
cinder/volume/utils.py

index 21019e52f9498b461497c9efd90316cda1843635..24375a7051f8e62fb7b67485d256f1d6d335cc64 100644 (file)
@@ -76,7 +76,9 @@ class ViewBuilder(common.ViewBuilder):
                 'description': backup.get('display_description'),
                 'fail_reason': backup.get('fail_reason'),
                 'volume_id': backup.get('volume_id'),
-                'links': self._get_links(request, backup['id'])
+                'links': self._get_links(request, backup['id']),
+                'is_incremental': backup.is_incremental,
+                'has_dependent_backups': backup.has_dependent_backups,
             }
         }
 
index b77b24a3117744a067efca8ec809b854c95570fe..223d169fa903c03d1562be437cc12a5904174635 100644 (file)
@@ -361,6 +361,13 @@ class BackupManager(manager.SchedulerDependentManager):
         backup.size = volume['size']
         backup.availability_zone = self.az
         backup.save()
+        # Handle the num_dependent_backups of parent backup when child backup
+        # has created successfully.
+        if backup.parent_id:
+            parent_backup = objects.Backup.get_by_id(context,
+                                                     backup.parent_id)
+            parent_backup.num_dependent_backups += 1
+            parent_backup.save()
         LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
         self._notify_about_backup_usage(context, backup, "create.end")
 
@@ -513,7 +520,14 @@ class BackupManager(manager.SchedulerDependentManager):
             LOG.exception(_LE("Failed to update usages deleting backup"))
 
         backup.destroy()
-
+        # If this backup is incremental backup, handle the
+        # num_dependent_backups of parent backup
+        if backup.parent_id:
+            parent_backup = objects.Backup.get_by_id(context,
+                                                     backup.parent_id)
+            if parent_backup.has_dependent_backups:
+                parent_backup.num_dependent_backups -= 1
+                parent_backup.save()
         # Commit the reservations
         if reservations:
             QUOTAS.commit(context, reservations,
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/054_add_has_dependent_backups_column_to_backups.py b/cinder/db/sqlalchemy/migrate_repo/versions/054_add_has_dependent_backups_column_to_backups.py
new file mode 100644 (file)
index 0000000..39603cb
--- /dev/null
@@ -0,0 +1,44 @@
+# Copyright (c) 2015 Huawei Technologies Co., Ltd.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from sqlalchemy import Column, Integer, MetaData, Table
+
+
+def upgrade(migrate_engine):
+    """Add num_dependent_backups column to backups."""
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    backups = Table('backups', meta, autoload=True)
+    num_dependent_backups = Column('num_dependent_backups', Integer, default=0)
+    backups.create_column(num_dependent_backups)
+    backups_list = list(backups.select().execute())
+    for backup in backups_list:
+        dep_bks_list = list(backups.select().where(backups.columns.parent_id ==
+                                                   backup.id).execute())
+        if dep_bks_list:
+            backups.update().where(backups.columns.id == backup.id).values(
+                num_dependent_backups=len(dep_bks_list)).execute()
+
+
+def downgrade(migrate_engine):
+    """Remove num_dependent_backups column to backups."""
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    backups = Table('backups', meta, autoload=True)
+    num_dependent_backups = backups.columns.num_dependent_backups
+
+    backups.drop_column(num_dependent_backups)
index f2bdc27d8691fdea2eb79725dcc0a15b6a92dc25..6236d7ec549d2f149f8053ebdf7e66fe8afa634f 100644 (file)
@@ -540,6 +540,7 @@ class Backup(BASE, CinderBase):
     object_count = Column(Integer)
     temp_volume_id = Column(String(36))
     temp_snapshot_id = Column(String(36))
+    num_dependent_backups = Column(Integer)
 
     @validates('fail_reason')
     def validate_fail_reason(self, key, fail_reason):
index 2ba3cd1a1cb009561d680cedb385b66bd4e96191..5a20c7840b8a5a44bca75d168761d9096f3f62f5 100644 (file)
@@ -36,7 +36,9 @@ LOG = logging.getLogger(__name__)
 class Backup(base.CinderPersistentObject, base.CinderObject,
              base.CinderObjectDictCompat):
     # Version 1.0: Initial version
-    VERSION = '1.0'
+    # Version 1.1: Add new field num_dependent_backups and extra fields
+    #              is_incremental and has_dependent_backups.
+    VERSION = '1.1'
 
     fields = {
         'id': fields.UUIDField(),
@@ -65,14 +67,23 @@ class Backup(base.CinderPersistentObject, base.CinderObject,
 
         'temp_volume_id': fields.StringField(nullable=True),
         'temp_snapshot_id': fields.StringField(nullable=True),
+        'num_dependent_backups': fields.IntegerField(),
     }
 
-    obj_extra_fields = ['name']
+    obj_extra_fields = ['name', 'is_incremental', 'has_dependent_backups']
 
     @property
     def name(self):
         return CONF.backup_name_template % self.id
 
+    @property
+    def is_incremental(self):
+        return bool(self.parent_id)
+
+    @property
+    def has_dependent_backups(self):
+        return bool(self.num_dependent_backups)
+
     def obj_make_compatible(self, primitive, target_version):
         """Make an object representation compatible with a target version."""
         super(Backup, self).obj_make_compatible(primitive, target_version)
index 65415db3c58392ab679f3af2514dbfaac46812f9..9ed7e9a98b03ae418a9a563f28d0728ab7f7ee00 100644 (file)
@@ -57,7 +57,8 @@ class BackupsAPITestCase(test.TestCase):
                        snapshot=False,
                        incremental=False,
                        parent_id=None,
-                       size=0, object_count=0, host='testhost'):
+                       size=0, object_count=0, host='testhost',
+                       num_dependent_backups=0):
         """Create a backup object."""
         backup = {}
         backup['volume_id'] = volume_id
@@ -75,6 +76,7 @@ class BackupsAPITestCase(test.TestCase):
         backup['snapshot'] = snapshot
         backup['incremental'] = incremental
         backup['parent_id'] = parent_id
+        backup['num_dependent_backups'] = num_dependent_backups
         return db.backup_create(context.get_admin_context(), backup)['id']
 
     @staticmethod
@@ -104,6 +106,8 @@ class BackupsAPITestCase(test.TestCase):
         self.assertEqual(0, res_dict['backup']['size'])
         self.assertEqual('creating', res_dict['backup']['status'])
         self.assertEqual(volume_id, res_dict['backup']['volume_id'])
+        self.assertFalse(res_dict['backup']['is_incremental'])
+        self.assertFalse(res_dict['backup']['has_dependent_backups'])
 
         db.backup_destroy(context.get_admin_context(), backup_id)
         db.volume_destroy(context.get_admin_context(), volume_id)
@@ -207,7 +211,7 @@ class BackupsAPITestCase(test.TestCase):
         res_dict = json.loads(res.body)
 
         self.assertEqual(200, res.status_int)
-        self.assertEqual(12, len(res_dict['backups'][0]))
+        self.assertEqual(14, len(res_dict['backups'][0]))
         self.assertEqual('az1', res_dict['backups'][0]['availability_zone'])
         self.assertEqual('volumebackups',
                          res_dict['backups'][0]['container'])
@@ -221,7 +225,7 @@ class BackupsAPITestCase(test.TestCase):
         self.assertEqual('creating', res_dict['backups'][0]['status'])
         self.assertEqual('1', res_dict['backups'][0]['volume_id'])
 
-        self.assertEqual(12, len(res_dict['backups'][1]))
+        self.assertEqual(14, len(res_dict['backups'][1]))
         self.assertEqual('az1', res_dict['backups'][1]['availability_zone'])
         self.assertEqual('volumebackups',
                          res_dict['backups'][1]['container'])
@@ -235,7 +239,7 @@ class BackupsAPITestCase(test.TestCase):
         self.assertEqual('creating', res_dict['backups'][1]['status'])
         self.assertEqual('1', res_dict['backups'][1]['volume_id'])
 
-        self.assertEqual(12, len(res_dict['backups'][2]))
+        self.assertEqual(14, len(res_dict['backups'][2]))
         self.assertEqual('az1', res_dict['backups'][2]['availability_zone'])
         self.assertEqual('volumebackups',
                          res_dict['backups'][2]['container'])
@@ -1643,3 +1647,52 @@ class BackupsAPITestCase(test.TestCase):
         backup = self.backup_api.get(self.context, backup_id)
         self.assertRaises(exception.NotSupportedOperation,
                           self.backup_api.delete, self.context, backup, True)
+
+    def test_show_incremental_backup(self):
+        volume_id = utils.create_volume(self.context, size=5)['id']
+        parent_backup_id = self._create_backup(volume_id, status="available",
+                                               num_dependent_backups=1)
+        backup_id = self._create_backup(volume_id, status="available",
+                                        incremental=True,
+                                        parent_id=parent_backup_id,
+                                        num_dependent_backups=1)
+        child_backup_id = self._create_backup(volume_id, status="available",
+                                              incremental=True,
+                                              parent_id=backup_id)
+
+        req = webob.Request.blank('/v2/fake/backups/%s' %
+                                  backup_id)
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/json'
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(200, res.status_int)
+        self.assertTrue(res_dict['backup']['is_incremental'])
+        self.assertTrue(res_dict['backup']['has_dependent_backups'])
+
+        req = webob.Request.blank('/v2/fake/backups/%s' %
+                                  parent_backup_id)
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/json'
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(200, res.status_int)
+        self.assertFalse(res_dict['backup']['is_incremental'])
+        self.assertTrue(res_dict['backup']['has_dependent_backups'])
+
+        req = webob.Request.blank('/v2/fake/backups/%s' %
+                                  child_backup_id)
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/json'
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(200, res.status_int)
+        self.assertTrue(res_dict['backup']['is_incremental'])
+        self.assertFalse(res_dict['backup']['has_dependent_backups'])
+
+        db.backup_destroy(context.get_admin_context(), child_backup_id)
+        db.backup_destroy(context.get_admin_context(), backup_id)
+        db.volume_destroy(context.get_admin_context(), volume_id)
index a3323d2af27491869e89f38d604e2e8251f830b7..892ff5ebefa308acfdeba0eb69f8cd07683c9c3a 100644 (file)
@@ -30,7 +30,8 @@ def fake_db_backup(**updates):
         'display_description': 'fake_description',
         'service_metadata': 'fake_metadata',
         'service': 'fake_service',
-        'object_count': 5
+        'object_count': 5,
+        'num_dependent_backups': 0,
     }
 
     for name, field in objects.Backup.fields.items():
index 4cd393ba7b0981574ac1ac9e3492c63a60cf7267..c5fd15b563e4eec5c7167746acfe4595e37f5296 100644 (file)
@@ -86,7 +86,8 @@ class TestBackup(test_objects.BaseObjectsTestCase):
         self.assertEqual('3', backup.temp_snapshot_id)
 
     def test_import_record(self):
-        backup = objects.Backup(context=self.context, id=1)
+        backup = objects.Backup(context=self.context, id=1, parent_id=None,
+                                num_dependent_backups=0)
         export_string = backup.encode_record()
         imported_backup = objects.Backup.decode_record(export_string)
 
@@ -94,7 +95,8 @@ class TestBackup(test_objects.BaseObjectsTestCase):
         self.assertDictEqual(dict(backup), imported_backup)
 
     def test_import_record_additional_info(self):
-        backup = objects.Backup(context=self.context, id=1)
+        backup = objects.Backup(context=self.context, id=1, parent_id=None,
+                                num_dependent_backups=0)
         extra_info = {'driver': {'key1': 'value1', 'key2': 'value2'}}
         extra_info_copy = extra_info.copy()
         export_string = backup.encode_record(extra_info=extra_info)
@@ -110,7 +112,8 @@ class TestBackup(test_objects.BaseObjectsTestCase):
         self.assertDictEqual(expected, imported_backup)
 
     def test_import_record_additional_info_cant_overwrite(self):
-        backup = objects.Backup(context=self.context, id=1)
+        backup = objects.Backup(context=self.context, id=1, parent_id=None,
+                                num_dependent_backups=0)
         export_string = backup.encode_record(id='fake_id')
         imported_backup = objects.Backup.decode_record(export_string)
 
index 52278d71589ba19999de85f817cdd3bf83c1a98c..5db16eb0172c435f13388930bfde05873f860a07 100644 (file)
@@ -758,6 +758,16 @@ class BackupTestCase(BaseBackupTest):
         result = self.backup_mgr.check_support_to_force_delete(self.ctxt)
         self.assertTrue(result)
 
+    def test_backup_has_dependent_backups(self):
+        """Test backup has dependent backups.
+
+        Test the query of has_dependent_backups in backup object is correct.
+        """
+        vol_size = 1
+        vol_id = self._create_volume_db_entry(size=vol_size)
+        backup = self._create_backup_db_entry(volume_id=vol_id)
+        self.assertFalse(backup.has_dependent_backups)
+
 
 class BackupTestCaseWithVerify(BaseBackupTest):
     """Test Case for backups."""
index 06d68d5eda5ac448a59798dd228b1ff4a76d54c1..b65601d5b9492124625ba3f0413d2501902b10a7 100644 (file)
@@ -1723,7 +1723,8 @@ class DBAPIBackupTestCase(BaseTest):
             'size': 1000,
             'object_count': 100,
             'temp_volume_id': 'temp_volume_id',
-            'temp_snapshot_id': 'temp_snapshot_id', }
+            'temp_snapshot_id': 'temp_snapshot_id',
+            'num_dependent_backups': 0, }
         if one:
             return base_values
 
index e5a335f721d6732ba527f6f353f839c05bef7e5c..57946e70a47bf7f017aed0a4d9fa36a40cc38afd 100644 (file)
@@ -883,6 +883,15 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertNotIn('object_current_version', services.c)
         self.assertNotIn('object_available_version', services.c)
 
+    def _check_054(self, engine, data):
+        backups = db_utils.get_table(engine, 'backups')
+        self.assertIsInstance(backups.c.num_dependent_backups.type,
+                              sqlalchemy.types.INTEGER)
+
+    def _post_downgrade_054(self, engine):
+        backups = db_utils.get_table(engine, 'backups')
+        self.assertNotIn('num_dependent_backups', backups.c)
+
     def test_walk_versions(self):
         self.walk_versions(True, False)
 
index 41b1a0add3f8509b3d26a5b676a838f289b88dbd..6ad3eb1ed391b31ebc064d8cbc33a74882b17b08 100644 (file)
@@ -351,6 +351,33 @@ class NotifyUsageTestCase(test.TestCase):
             'cgsnapshot.test_suffix',
             mock_usage.return_value)
 
+    def test_usage_from_backup(self):
+        raw_backup = {
+            'project_id': '12b0330ec2584a',
+            'user_id': '158cba1b8c2bb6008e',
+            'availability_zone': 'nova',
+            'id': 'fake_id',
+            'host': 'fake_host',
+            'display_name': 'test_backup',
+            'created_at': '2014-12-11T10:10:00',
+            'status': 'available',
+            'volume_id': 'fake_volume_id',
+            'size': 1,
+            'service_metadata': None,
+            'service': 'cinder.backup.drivers.swift',
+            'fail_reason': None,
+            'parent_id': 'fake_parent_id',
+            'num_dependent_backups': 0,
+        }
+
+        # Make it easier to find out differences between raw and expected.
+        expected_backup = raw_backup.copy()
+        expected_backup['tenant_id'] = expected_backup.pop('project_id')
+        expected_backup['backup_id'] = expected_backup.pop('id')
+
+        usage_info = volume_utils._usage_from_backup(raw_backup)
+        self.assertEqual(expected_backup, usage_info)
+
 
 class LVMVolumeDriverTestCase(test.TestCase):
     def test_convert_blocksize_option(self):
index 893cf3bef8b53bcf0f2843be8e9fb94d1f445459..84ce6a4ed247170f290147286dfa8eeeea7298f3 100644 (file)
@@ -88,6 +88,7 @@ def _usage_from_volume(context, volume_ref, **kw):
 
 
 def _usage_from_backup(backup_ref, **kw):
+    num_dependent_backups = backup_ref['num_dependent_backups']
     usage_info = dict(tenant_id=backup_ref['project_id'],
                       user_id=backup_ref['user_id'],
                       availability_zone=backup_ref['availability_zone'],
@@ -100,7 +101,10 @@ def _usage_from_backup(backup_ref, **kw):
                       size=backup_ref['size'],
                       service_metadata=backup_ref['service_metadata'],
                       service=backup_ref['service'],
-                      fail_reason=backup_ref['fail_reason'])
+                      fail_reason=backup_ref['fail_reason'],
+                      parent_id=backup_ref['parent_id'],
+                      num_dependent_backups=num_dependent_backups,
+                      )
 
     usage_info.update(kw)
     return usage_info